Lines Matching +full:a +full:- +full:m

1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
9 * The Mach Operating System project at Carnegie-Mellon University.
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 /*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
57 * Pittsburgh PA 15213-3890
132 "Number of failed per-page atomic queue state updates");
153 "verify content of freed zero-filled pages");
157 * bogus page -- for I/O to/from partially complete buffers,
172 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
176 static void vm_page_alloc_check(vm_page_t m);
178 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
180 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
181 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
182 static bool vm_page_free_prep(vm_page_t m);
183 static void vm_page_free_toq(vm_page_t m);
185 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object);
186 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
190 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
211 "Per-CPU page cache size");
233 pgcache = &vmd->vmd_pgcache[pool]; in vm_page_init_cache_zones()
234 pgcache->domain = domain; in vm_page_init_cache_zones()
235 pgcache->pool = pool; in vm_page_init_cache_zones()
236 pgcache->zone = uma_zcache_create("vm pgcache", in vm_page_init_cache_zones()
246 vmd->vmd_page_count / 1000; in vm_page_init_cache_zones()
247 uma_zone_set_maxcache(pgcache->zone, cache); in vm_page_init_cache_zones()
264 * size. Must be called before any use of page-size
272 if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) in vm_set_page_size()
273 panic("vm_set_page_size: page size not a power of two"); in vm_set_page_size()
300 * the kenv and we know it's null-terminated. in vm_page_blacklist_next()
342 vm_page_t m; in vm_page_blacklist_add() local
345 m = vm_phys_paddr_to_vm_page(pa); in vm_page_blacklist_add()
346 if (m == NULL) in vm_page_blacklist_add()
354 vm_domain_freecnt_inc(vmd, -1); in vm_page_blacklist_add()
355 TAILQ_INSERT_TAIL(&blacklist_head, m, plinks.q); in vm_page_blacklist_add()
367 * onto a list for reporting via the vm.page_blacklist sysctl.
386 * Search for a special module named "ram_blacklist". It'll be a
408 *end = ptr + len - 1; in vm_page_blacklist_load()
420 vm_page_t m; in sysctl_vm_page_blacklist() local
429 TAILQ_FOREACH(m, &blacklist_head, plinks.q) { in sysctl_vm_page_blacklist()
431 (uintmax_t)m->phys_addr); in sysctl_vm_page_blacklist()
440 * Initialize a dummy page for use in scans of the specified paging queue.
442 * Nonetheless, it write busies the page as a safety precaution.
449 marker->flags = PG_MARKER; in vm_page_init_marker()
450 marker->a.flags = aflags; in vm_page_init_marker()
451 marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; in vm_page_init_marker()
452 marker->a.queue = queue; in vm_page_init_marker()
464 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = in vm_page_domain_init()
466 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = in vm_page_domain_init()
468 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = in vm_page_domain_init()
471 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = in vm_page_domain_init()
473 vmd->vmd_domain = domain; in vm_page_domain_init()
474 vmd->vmd_page_count = 0; in vm_page_domain_init()
475 vmd->vmd_free_count = 0; in vm_page_domain_init()
476 vmd->vmd_segs = 0; in vm_page_domain_init()
477 vmd->vmd_oom = false; in vm_page_domain_init()
478 vmd->vmd_helper_threads_enabled = true; in vm_page_domain_init()
480 pq = &vmd->vmd_pagequeues[i]; in vm_page_domain_init()
481 TAILQ_INIT(&pq->pq_pl); in vm_page_domain_init()
482 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", in vm_page_domain_init()
484 pq->pq_pdpages = 0; in vm_page_domain_init()
485 vm_page_init_marker(&vmd->vmd_markers[i], i, 0); in vm_page_domain_init()
487 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); in vm_page_domain_init()
488 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF); in vm_page_domain_init()
489 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); in vm_page_domain_init()
492 * inacthead is used to provide FIFO ordering for LRU-bypassing in vm_page_domain_init()
495 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED); in vm_page_domain_init()
496 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl, in vm_page_domain_init()
497 &vmd->vmd_inacthead, plinks.q); in vm_page_domain_init()
505 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED); in vm_page_domain_init()
506 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED); in vm_page_domain_init()
507 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, in vm_page_domain_init()
508 &vmd->vmd_clock[0], plinks.q); in vm_page_domain_init()
509 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl, in vm_page_domain_init()
510 &vmd->vmd_clock[1], plinks.q); in vm_page_domain_init()
514 * Initialize a physical page in preparation for adding it to the free
518 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool) in vm_page_init_page() argument
520 m->object = NULL; in vm_page_init_page()
521 m->ref_count = 0; in vm_page_init_page()
522 m->busy_lock = VPB_FREED; in vm_page_init_page()
523 m->flags = m->a.flags = 0; in vm_page_init_page()
524 m->phys_addr = pa; in vm_page_init_page()
525 m->a.queue = PQ_NONE; in vm_page_init_page()
526 m->psind = 0; in vm_page_init_page()
527 m->segind = segind; in vm_page_init_page()
528 m->order = VM_NFREEORDER; in vm_page_init_page()
529 m->pool = pool; in vm_page_init_page()
530 m->valid = m->dirty = 0; in vm_page_init_page()
531 pmap_page_init(m); in vm_page_init_page()
541 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. in vm_page_array_alloc()
542 * However, because this page is allocated from KVM, out-of-bounds in vm_page_array_alloc()
550 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); in vm_page_array_alloc()
572 vm_page_t m; in vm_page_startup() local
610 new_end -= witness_size; in vm_page_startup()
619 * Allocate a bitmap to indicate that a random physical page in vm_page_startup()
620 * needs to be included in a minidump. in vm_page_startup()
632 vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) - in vm_page_startup()
638 new_end -= vm_page_dump_size; in vm_page_startup()
645 * in a crash dump. When pmap_map() uses the direct map, they are in vm_page_startup()
658 * included in a crash dump. Since the message buffer is accessed in vm_page_startup()
661 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); in vm_page_startup()
678 size = phys_avail[1] - phys_avail[0]; in vm_page_startup()
684 size += phys_avail[i + 1] - phys_avail[i]; in vm_page_startup()
693 size += vm_phys_segs[i].end - vm_phys_segs[i].start; in vm_page_startup()
702 size = high_avail - low_avail; in vm_page_startup()
713 * the overhead of a page structure per page only if vm_page_array is in vm_page_startup()
727 * a page (PAGE_SIZE) without a corresponding in vm_page_startup()
735 high_avail -= PAGE_SIZE; in vm_page_startup()
736 new_end -= PAGE_SIZE; in vm_page_startup()
752 * Include vm_page_array and vm_reserv_array in a crash dump. in vm_page_startup()
785 m = &vm_page_array[ii]; in vm_page_startup()
786 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0, in vm_page_startup()
788 m->flags = PG_FICTITIOUS; in vm_page_startup()
800 startp = seg->start; in vm_page_startup()
802 if (startp >= seg->end) in vm_page_startup()
810 m = vm_phys_seg_paddr_to_vm_page(seg, startp); in vm_page_startup()
811 for (endp = MIN(phys_avail[i], seg->end); in vm_page_startup()
812 startp < endp; startp += PAGE_SIZE, m++) { in vm_page_startup()
813 vm_page_init_page(m, startp, segind, in vm_page_startup()
823 if (seg->end <= phys_avail[i] || in vm_page_startup()
824 seg->start >= phys_avail[i + 1]) in vm_page_startup()
827 startp = MAX(seg->start, phys_avail[i]); in vm_page_startup()
828 endp = MIN(seg->end, phys_avail[i + 1]); in vm_page_startup()
829 pagecount = (u_long)atop(endp - startp); in vm_page_startup()
846 m = vm_phys_seg_paddr_to_vm_page(seg, startp); in vm_page_startup()
847 vm_page_init_page(m, startp, segind, pool); in vm_page_startup()
850 vm_page_init_page(&m[j], in vm_page_startup()
855 vmd = VM_DOMAIN(seg->domain); in vm_page_startup()
857 vm_phys_enqueue_contig(m, pool, pagecount); in vm_page_startup()
861 vmd->vmd_page_count += (u_int)pagecount; in vm_page_startup()
862 vmd->vmd_segs |= 1UL << segind; in vm_page_startup()
888 vm_page_reference(vm_page_t m) in vm_page_reference() argument
891 vm_page_aflag_set(m, PGA_REFERENCED); in vm_page_reference()
902 vm_page_trybusy(vm_page_t m, int allocflags) in vm_page_trybusy() argument
906 return (vm_page_trysbusy(m)); in vm_page_trybusy()
908 return (vm_page_tryxbusy(m)); in vm_page_trybusy()
919 vm_page_tryacquire(vm_page_t m, int allocflags) in vm_page_tryacquire() argument
923 locked = vm_page_trybusy(m, allocflags); in vm_page_tryacquire()
925 vm_page_wire(m); in vm_page_tryacquire()
936 vm_page_busy_acquire(vm_page_t m, int allocflags) in vm_page_busy_acquire() argument
942 * The page-specific object must be cached because page in vm_page_busy_acquire()
944 * re-lock of a different object. in vm_page_busy_acquire()
945 * It is assumed that a reference to the object is already in vm_page_busy_acquire()
948 obj = atomic_load_ptr(&m->object); in vm_page_busy_acquire()
950 if (vm_page_tryacquire(m, allocflags)) in vm_page_busy_acquire()
958 MPASS(locked || vm_page_wired(m)); in vm_page_busy_acquire()
959 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags, in vm_page_busy_acquire()
964 KASSERT(m->object == obj || m->object == NULL, in vm_page_busy_acquire()
966 m, obj)); in vm_page_busy_acquire()
973 * Downgrade an exclusive busy page into a single shared busy page.
976 vm_page_busy_downgrade(vm_page_t m) in vm_page_busy_downgrade() argument
980 vm_page_assert_xbusied(m); in vm_page_busy_downgrade()
982 x = vm_page_busy_fetch(m); in vm_page_busy_downgrade()
984 if (atomic_fcmpset_rel_int(&m->busy_lock, in vm_page_busy_downgrade()
989 wakeup(m); in vm_page_busy_downgrade()
996 * Attempt to upgrade a single shared busy into an exclusive busy.
999 vm_page_busy_tryupgrade(vm_page_t m) in vm_page_busy_tryupgrade() argument
1003 vm_page_assert_sbusied(m); in vm_page_busy_tryupgrade()
1005 x = vm_page_busy_fetch(m); in vm_page_busy_tryupgrade()
1012 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, in vm_page_busy_tryupgrade()
1022 * Return a positive value if the page is shared busied, 0 otherwise.
1025 vm_page_sbusied(vm_page_t m) in vm_page_sbusied() argument
1029 x = vm_page_busy_fetch(m); in vm_page_sbusied()
1036 * Shared unbusy a page.
1039 vm_page_sunbusy(vm_page_t m) in vm_page_sunbusy() argument
1043 vm_page_assert_sbusied(m); in vm_page_sunbusy()
1045 x = vm_page_busy_fetch(m); in vm_page_sunbusy()
1050 if (atomic_fcmpset_int(&m->busy_lock, &x, in vm_page_sunbusy()
1051 x - VPB_ONE_SHARER)) in vm_page_sunbusy()
1057 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) in vm_page_sunbusy()
1061 wakeup(m); in vm_page_sunbusy()
1070 * This is used to implement the hard-path of the busying mechanism.
1073 * will not sleep if the page is shared-busy.
1081 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags) in vm_page_busy_sleep() argument
1085 obj = m->object; in vm_page_busy_sleep()
1088 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags, in vm_page_busy_sleep()
1096 * This is used to implement the hard-path of busying mechanism.
1099 * will not sleep if the page is shared-busy.
1105 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, in vm_page_busy_sleep_unlocked() argument
1110 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false); in vm_page_busy_sleep_unlocked()
1126 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, in _vm_page_busy_sleep() argument
1143 if (!vm_page_busied(m)) in _vm_page_busy_sleep()
1147 sleepq_lock(m); in _vm_page_busy_sleep()
1148 x = vm_page_busy_fetch(m); in _vm_page_busy_sleep()
1156 m->object != obj || m->pindex != pindex) { in _vm_page_busy_sleep()
1157 sleepq_release(m); in _vm_page_busy_sleep()
1162 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS)); in _vm_page_busy_sleep()
1166 sleepq_add(m, NULL, wmesg, 0, 0); in _vm_page_busy_sleep()
1167 sleepq_wait(m, PVM); in _vm_page_busy_sleep()
1175 * Try to shared busy a page.
1180 vm_page_trysbusy(vm_page_t m) in vm_page_trysbusy() argument
1185 obj = m->object; in vm_page_trysbusy()
1186 x = vm_page_busy_fetch(m); in vm_page_trysbusy()
1196 if (atomic_fcmpset_acq_int(&m->busy_lock, &x, in vm_page_trysbusy()
1202 obj = m->object; in vm_page_trysbusy()
1204 vm_page_sunbusy(m); in vm_page_trysbusy()
1213 * Try to exclusive busy a page.
1218 vm_page_tryxbusy(vm_page_t m) in vm_page_tryxbusy() argument
1222 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, in vm_page_tryxbusy()
1226 obj = m->object; in vm_page_tryxbusy()
1228 vm_page_xunbusy(m); in vm_page_tryxbusy()
1235 vm_page_xunbusy_hard_tail(vm_page_t m) in vm_page_xunbusy_hard_tail() argument
1237 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); in vm_page_xunbusy_hard_tail()
1239 wakeup(m); in vm_page_xunbusy_hard_tail()
1245 * Called when unbusy has failed because there is a waiter.
1248 vm_page_xunbusy_hard(vm_page_t m) in vm_page_xunbusy_hard() argument
1250 vm_page_assert_xbusied(m); in vm_page_xunbusy_hard()
1251 vm_page_xunbusy_hard_tail(m); in vm_page_xunbusy_hard()
1255 vm_page_xunbusy_hard_unchecked(vm_page_t m) in vm_page_xunbusy_hard_unchecked() argument
1257 vm_page_assert_xbusied_unchecked(m); in vm_page_xunbusy_hard_unchecked()
1258 vm_page_xunbusy_hard_tail(m); in vm_page_xunbusy_hard_unchecked()
1262 vm_page_busy_free(vm_page_t m) in vm_page_busy_free() argument
1267 x = atomic_swap_int(&m->busy_lock, VPB_FREED); in vm_page_busy_free()
1269 wakeup(m); in vm_page_busy_free()
1281 for (; count != 0; count--) { in vm_page_unhold_pages()
1290 vm_page_t m; in PHYS_TO_VM_PAGE() local
1293 m = vm_phys_paddr_to_vm_page(pa); in PHYS_TO_VM_PAGE()
1294 if (m == NULL) in PHYS_TO_VM_PAGE()
1295 m = vm_phys_fictitious_to_vm_page(pa); in PHYS_TO_VM_PAGE()
1296 return (m); in PHYS_TO_VM_PAGE()
1301 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { in PHYS_TO_VM_PAGE()
1302 m = &vm_page_array[pi - first_page]; in PHYS_TO_VM_PAGE()
1303 return (m); in PHYS_TO_VM_PAGE()
1314 * Create a fictitious page with the specified physical address and
1315 * memory attribute. The memory attribute is the only the machine-
1316 * dependent aspect of a fictitious page that must be initialized.
1321 vm_page_t m; in vm_page_getfake() local
1323 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); in vm_page_getfake()
1324 vm_page_initfake(m, paddr, memattr); in vm_page_getfake()
1325 return (m); in vm_page_getfake()
1329 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) in vm_page_initfake() argument
1332 if ((m->flags & PG_FICTITIOUS) != 0) { in vm_page_initfake()
1340 m->phys_addr = paddr; in vm_page_initfake()
1341 m->a.queue = PQ_NONE; in vm_page_initfake()
1343 m->flags = PG_FICTITIOUS; in vm_page_initfake()
1345 m->oflags = VPO_UNMANAGED; in vm_page_initfake()
1346 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; in vm_page_initfake()
1348 m->ref_count = 1; in vm_page_initfake()
1349 pmap_page_init(m); in vm_page_initfake()
1351 pmap_page_set_memattr(m, memattr); in vm_page_initfake()
1357 * Release a fictitious page.
1360 vm_page_putfake(vm_page_t m) in vm_page_putfake() argument
1363 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); in vm_page_putfake()
1364 KASSERT((m->flags & PG_FICTITIOUS) != 0, in vm_page_putfake()
1365 ("vm_page_putfake: bad page %p", m)); in vm_page_putfake()
1366 vm_page_assert_xbusied(m); in vm_page_putfake()
1367 vm_page_busy_free(m); in vm_page_putfake()
1368 uma_zfree(fakepg_zone, m); in vm_page_putfake()
1378 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) in vm_page_updatefake() argument
1381 KASSERT((m->flags & PG_FICTITIOUS) != 0, in vm_page_updatefake()
1382 ("vm_page_updatefake: bad page %p", m)); in vm_page_updatefake()
1383 m->phys_addr = paddr; in vm_page_updatefake()
1384 pmap_page_set_memattr(m, memattr); in vm_page_updatefake()
1390 * Free a page.
1393 vm_page_free(vm_page_t m) in vm_page_free() argument
1396 m->flags &= ~PG_ZERO; in vm_page_free()
1397 vm_page_free_toq(m); in vm_page_free()
1403 * Free a page to the zerod-pages queue
1406 vm_page_free_zero(vm_page_t m) in vm_page_free_zero() argument
1409 m->flags |= PG_ZERO; in vm_page_free_zero()
1410 vm_page_free_toq(m); in vm_page_free_zero()
1414 * Unbusy and handle the page queueing for a page from a getpages request that
1418 vm_page_readahead_finish(vm_page_t m) in vm_page_readahead_finish() argument
1422 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); in vm_page_readahead_finish()
1430 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0) in vm_page_readahead_finish()
1431 vm_page_activate(m); in vm_page_readahead_finish()
1433 vm_page_deactivate(m); in vm_page_readahead_finish()
1434 vm_page_xunbusy_unchecked(m); in vm_page_readahead_finish()
1439 * This is intended to be used when reading a page from backing store fails.
1442 vm_page_free_invalid(vm_page_t m) in vm_page_free_invalid() argument
1445 KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); in vm_page_free_invalid()
1446 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); in vm_page_free_invalid()
1447 KASSERT(m->object != NULL, ("page %p has no object", m)); in vm_page_free_invalid()
1448 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_page_free_invalid()
1452 * I/O error, in which case the page was xbusied by a different thread. in vm_page_free_invalid()
1454 vm_page_xbusy_claim(m); in vm_page_free_invalid()
1463 if (vm_page_remove(m)) in vm_page_free_invalid()
1464 vm_page_free(m); in vm_page_free_invalid()
1473 * call is made from the machine-independent layer.
1480 vm_page_dirty_KBI(vm_page_t m) in vm_page_dirty_KBI() argument
1484 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); in vm_page_dirty_KBI()
1485 m->dirty = VM_PAGE_BITS_ALL; in vm_page_dirty_KBI()
1495 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex, in vm_page_insert_lookup() argument
1501 KASSERT(m->object == NULL, in vm_page_insert_lookup()
1502 ("vm_page_insert: page %p already inserted", m)); in vm_page_insert_lookup()
1507 m->object = object; in vm_page_insert_lookup()
1508 m->pindex = pindex; in vm_page_insert_lookup()
1509 m->ref_count |= VPRC_OBJREF; in vm_page_insert_lookup()
1515 error = vm_radix_iter_insert(pages, m); in vm_page_insert_lookup()
1517 error = vm_radix_insert(&object->rtree, m); in vm_page_insert_lookup()
1519 m->object = NULL; in vm_page_insert_lookup()
1520 m->pindex = 0; in vm_page_insert_lookup()
1521 m->ref_count &= ~VPRC_OBJREF; in vm_page_insert_lookup()
1525 vm_page_insert_radixdone(m, object); in vm_page_insert_lookup()
1526 vm_pager_page_inserted(object, m); in vm_page_insert_lookup()
1538 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) in vm_page_insert() argument
1540 return (vm_page_insert_lookup(m, object, pindex, false, NULL)); in vm_page_insert()
1546 * Tries to insert the page "m" into the specified object at offset
1553 vm_page_iter_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex, in vm_page_iter_insert() argument
1556 return (vm_page_insert_lookup(m, object, pindex, true, pages)); in vm_page_iter_insert()
1562 * Complete page "m" insertion into the specified object after the
1568 vm_page_insert_radixdone(vm_page_t m, vm_object_t object) in vm_page_insert_radixdone() argument
1572 KASSERT(object != NULL && m->object == object, in vm_page_insert_radixdone()
1573 ("vm_page_insert_radixdone: page %p has inconsistent object", m)); in vm_page_insert_radixdone()
1574 KASSERT((m->ref_count & VPRC_OBJREF) != 0, in vm_page_insert_radixdone()
1575 ("vm_page_insert_radixdone: page %p is missing object ref", m)); in vm_page_insert_radixdone()
1580 object->resident_page_count++; in vm_page_insert_radixdone()
1585 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) in vm_page_insert_radixdone()
1586 vhold(object->handle); in vm_page_insert_radixdone()
1589 * Since we are inserting a new and possibly dirty page, in vm_page_insert_radixdone()
1592 if (pmap_page_is_write_mapped(m)) in vm_page_insert_radixdone()
1599 * Complete page "m" removal from the specified object after the radix trie
1606 vm_page_remove_radixdone(vm_page_t m) in vm_page_remove_radixdone() argument
1610 vm_page_assert_xbusied(m); in vm_page_remove_radixdone()
1611 object = m->object; in vm_page_remove_radixdone()
1613 KASSERT((m->ref_count & VPRC_OBJREF) != 0, in vm_page_remove_radixdone()
1614 ("page %p is missing its object ref", m)); in vm_page_remove_radixdone()
1617 if ((m->a.flags & PGA_SWAP_FREE) != 0) in vm_page_remove_radixdone()
1618 vm_pager_page_unswapped(m); in vm_page_remove_radixdone()
1620 vm_pager_page_removed(object, m); in vm_page_remove_radixdone()
1621 m->object = NULL; in vm_page_remove_radixdone()
1626 object->resident_page_count--; in vm_page_remove_radixdone()
1631 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) in vm_page_remove_radixdone()
1632 vdrop(object->handle); in vm_page_remove_radixdone()
1643 vm_page_free_object_prep(vm_page_t m) in vm_page_free_object_prep() argument
1645 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) == in vm_page_free_object_prep()
1646 ((m->object->flags & OBJ_UNMANAGED) != 0), in vm_page_free_object_prep()
1648 __func__, m)); in vm_page_free_object_prep()
1649 vm_page_assert_xbusied(m); in vm_page_free_object_prep()
1655 KASSERT((m->flags & PG_FICTITIOUS) != 0 || in vm_page_free_object_prep()
1656 m->ref_count == VPRC_OBJREF, in vm_page_free_object_prep()
1658 __func__, m, m->ref_count)); in vm_page_free_object_prep()
1659 vm_page_remove_radixdone(m); in vm_page_free_object_prep()
1660 m->ref_count -= VPRC_OBJREF; in vm_page_free_object_prep()
1670 vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m) in vm_page_iter_free() argument
1673 vm_page_free_object_prep(m); in vm_page_iter_free()
1674 vm_page_xunbusy(m); in vm_page_iter_free()
1675 m->flags &= ~PG_ZERO; in vm_page_iter_free()
1676 vm_page_free_toq(m); in vm_page_iter_free()
1688 * final ref and the caller does not hold a wire reference it may not
1692 vm_page_remove(vm_page_t m) in vm_page_remove() argument
1696 dropped = vm_page_remove_xbusy(m); in vm_page_remove()
1697 vm_page_xunbusy(m); in vm_page_remove()
1709 vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m) in vm_page_iter_remove() argument
1714 vm_page_remove_radixdone(m); in vm_page_iter_remove()
1715 dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); in vm_page_iter_remove()
1716 vm_page_xunbusy(m); in vm_page_iter_remove()
1727 vm_page_radix_remove(vm_page_t m) in vm_page_radix_remove() argument
1731 mrem = vm_radix_remove(&m->object->rtree, m->pindex); in vm_page_radix_remove()
1732 KASSERT(mrem == m, in vm_page_radix_remove()
1733 ("removed page %p, expected page %p", mrem, m)); in vm_page_radix_remove()
1743 vm_page_remove_xbusy(vm_page_t m) in vm_page_remove_xbusy() argument
1746 vm_page_radix_remove(m); in vm_page_remove_xbusy()
1747 vm_page_remove_radixdone(m); in vm_page_remove_xbusy()
1748 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); in vm_page_remove_xbusy()
1764 return (vm_radix_lookup(&object->rtree, pindex)); in vm_page_lookup()
1776 vm_radix_iter_init(pages, &object->rtree); in vm_page_iter_init()
1789 vm_radix_iter_limit_init(pages, &object->rtree, limit); in vm_page_iter_limit_init()
1804 return (vm_radix_lookup_unlocked(&object->rtree, pindex)); in vm_page_lookup_unlocked()
1810 * Returns a page that must already have been busied by
1816 vm_page_t m; in vm_page_relookup() local
1818 m = vm_page_lookup_unlocked(object, pindex); in vm_page_relookup()
1819 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) && in vm_page_relookup()
1820 m->object == object && m->pindex == pindex, in vm_page_relookup()
1821 ("vm_page_relookup: Invalid page %p", m)); in vm_page_relookup()
1822 return (m); in vm_page_relookup()
1827 * incorrect acquires. The page may have been freed after we acquired a
1832 vm_page_busy_release(vm_page_t m) in vm_page_busy_release() argument
1836 x = vm_page_busy_fetch(m); in vm_page_busy_release()
1841 if (atomic_fcmpset_int(&m->busy_lock, &x, in vm_page_busy_release()
1842 x - VPB_ONE_SHARER)) in vm_page_busy_release()
1848 ("vm_page_busy_release: %p xbusy not owned.", m)); in vm_page_busy_release()
1849 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) in vm_page_busy_release()
1852 wakeup(m); in vm_page_busy_release()
1858 * Uses the page mnew as a replacement for an existing page at index
1864 * A return value of true means mold is now free. If this is not the
1865 * final ref and the caller does not hold a wire reference it may not
1877 KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0, in vm_page_replace_hold()
1886 mnew->object = object; in vm_page_replace_hold()
1887 mnew->pindex = pindex; in vm_page_replace_hold()
1888 atomic_set_int(&mnew->ref_count, VPRC_OBJREF); in vm_page_replace_hold()
1889 mret = vm_radix_replace(&object->rtree, mnew); in vm_page_replace_hold()
1892 KASSERT((mold->oflags & VPO_UNMANAGED) == in vm_page_replace_hold()
1893 (mnew->oflags & VPO_UNMANAGED), in vm_page_replace_hold()
1896 mold->object = NULL; in vm_page_replace_hold()
1925 * Tries to move the specified page from its current object to a new object
1928 * was aborted due to a failed memory allocation.
1930 * Panics if a page already resides in the new object at the new pindex.
1940 vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m, in vm_page_iter_rename() argument
1945 KASSERT((m->ref_count & VPRC_OBJREF) != 0, in vm_page_iter_rename()
1946 ("%s: page %p is missing object ref", __func__, m)); in vm_page_iter_rename()
1947 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_page_iter_rename()
1951 * Create a custom version of vm_page_insert() which does not depend in vm_page_iter_rename()
1955 opidx = m->pindex; in vm_page_iter_rename()
1956 m->pindex = new_pindex; in vm_page_iter_rename()
1957 if (vm_radix_insert(&new_object->rtree, m) != 0) { in vm_page_iter_rename()
1958 m->pindex = opidx; in vm_page_iter_rename()
1965 m->pindex = opidx; in vm_page_iter_rename()
1967 vm_page_remove_radixdone(m); in vm_page_iter_rename()
1970 m->pindex = new_pindex; in vm_page_iter_rename()
1971 m->object = new_object; in vm_page_iter_rename()
1973 vm_page_insert_radixdone(m, new_object); in vm_page_iter_rename()
1974 if (vm_page_any_valid(m)) in vm_page_iter_rename()
1975 vm_page_dirty(m); in vm_page_iter_rename()
1976 vm_pager_page_inserted(new_object, m); in vm_page_iter_rename()
1983 * Allocate and return a page that is associated with the specified
1990 * VM_ALLOC_SYSTEM system *really* needs a page
1997 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
2003 * VM_ALLOC_ZERO prefer a zeroed page
2015 * Allocate a page in the specified object with the given page index. If the
2023 vm_page_t m; in vm_page_alloc_iter() local
2030 m = vm_page_alloc_domain_iter(object, pindex, domain, req, in vm_page_alloc_iter()
2032 if (m != NULL) in vm_page_alloc_iter()
2036 return (m); in vm_page_alloc_iter()
2051 limit = vmd->vmd_interrupt_free_min; in _vm_domain_allocate()
2053 limit = vmd->vmd_free_reserved; in _vm_domain_allocate()
2059 old = atomic_load_int(&vmd->vmd_free_count); in _vm_domain_allocate()
2063 new = old - npages; in _vm_domain_allocate()
2064 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0); in _vm_domain_allocate()
2068 pagedaemon_wakeup(vmd->vmd_domain); in _vm_domain_allocate()
2071 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) || in _vm_domain_allocate()
2072 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe)) in _vm_domain_allocate()
2097 vm_page_t m; in vm_page_alloc_domain_iter() local
2114 m = NULL; in vm_page_alloc_domain_iter()
2121 m = vm_page_alloc_nofree_domain(domain, req); in vm_page_alloc_domain_iter()
2122 if (m != NULL) in vm_page_alloc_domain_iter()
2127 * Can we allocate the page from a reservation? in vm_page_alloc_domain_iter()
2130 (m = vm_reserv_alloc_page(object, pindex, domain, req, pages)) != in vm_page_alloc_domain_iter()
2136 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) { in vm_page_alloc_domain_iter()
2137 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone, in vm_page_alloc_domain_iter()
2139 if (m != NULL) { in vm_page_alloc_domain_iter()
2149 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0); in vm_page_alloc_domain_iter()
2151 if (m == NULL) { in vm_page_alloc_domain_iter()
2159 if (m == NULL) { in vm_page_alloc_domain_iter()
2170 * At this point we had better have found a good page. in vm_page_alloc_domain_iter()
2173 vm_page_dequeue(m); in vm_page_alloc_domain_iter()
2174 vm_page_alloc_check(m); in vm_page_alloc_domain_iter()
2179 flags |= m->flags & PG_ZERO; in vm_page_alloc_domain_iter()
2184 m->flags = flags; in vm_page_alloc_domain_iter()
2185 m->a.flags = 0; in vm_page_alloc_domain_iter()
2186 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; in vm_page_alloc_domain_iter()
2187 m->pool = VM_FREEPOOL_DEFAULT; in vm_page_alloc_domain_iter()
2189 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; in vm_page_alloc_domain_iter()
2191 m->busy_lock = VPB_SHARERS_WORD(1); in vm_page_alloc_domain_iter()
2193 m->busy_lock = VPB_UNBUSIED; in vm_page_alloc_domain_iter()
2196 m->ref_count = 1; in vm_page_alloc_domain_iter()
2198 m->a.act_count = 0; in vm_page_alloc_domain_iter()
2200 if (vm_page_iter_insert(m, object, pindex, pages)) { in vm_page_alloc_domain_iter()
2203 m->ref_count = 0; in vm_page_alloc_domain_iter()
2205 KASSERT(m->object == NULL, ("page %p has object", m)); in vm_page_alloc_domain_iter()
2206 m->oflags = VPO_UNMANAGED; in vm_page_alloc_domain_iter()
2207 m->busy_lock = VPB_UNBUSIED; in vm_page_alloc_domain_iter()
2209 vm_page_free_toq(m); in vm_page_alloc_domain_iter()
2220 if (object->memattr != VM_MEMATTR_DEFAULT && in vm_page_alloc_domain_iter()
2221 (object->flags & OBJ_FICTITIOUS) == 0) in vm_page_alloc_domain_iter()
2222 pmap_page_set_memattr(m, object->memattr); in vm_page_alloc_domain_iter()
2224 return (m); in vm_page_alloc_domain_iter()
2230 * Allocate a contiguous set of physical pages of the given size "npages"
2235 * non-zero, then the set of physical pages cannot cross any physical
2236 * address boundary that is a multiple of that value. Both "alignment"
2237 * and "boundary" must be a power of two.
2259 * VM_ALLOC_NODUMP do not include the pages in a kernel core dump
2274 vm_page_t m; in vm_page_alloc_contig() local
2278 start_segind = -1; in vm_page_alloc_contig()
2284 m = vm_page_alloc_contig_domain(object, pindex, domain, req, in vm_page_alloc_contig()
2286 if (m != NULL) in vm_page_alloc_contig()
2288 if (start_segind == -1) in vm_page_alloc_contig()
2291 npages, low, high) == -1) { in vm_page_alloc_contig()
2296 return (m); in vm_page_alloc_contig()
2324 * Try to break a reservation to allocate the pages. in vm_page_find_contig_domain()
2343 vm_page_t m, m_ret, mpred; in vm_page_alloc_contig_domain() local
2355 KASSERT((object->flags & OBJ_FICTITIOUS) == 0, in vm_page_alloc_contig_domain()
2364 * Can we allocate the pages from a reservation? in vm_page_alloc_contig_domain()
2386 oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; in vm_page_alloc_contig_domain()
2395 if (object->memattr != VM_MEMATTR_DEFAULT && in vm_page_alloc_contig_domain()
2397 memattr = object->memattr; in vm_page_alloc_contig_domain()
2398 for (m = m_ret; m < &m_ret[npages]; m++) { in vm_page_alloc_contig_domain()
2399 vm_page_dequeue(m); in vm_page_alloc_contig_domain()
2400 vm_page_alloc_check(m); in vm_page_alloc_contig_domain()
2401 m->a.flags = 0; in vm_page_alloc_contig_domain()
2402 m->flags = (m->flags | PG_NODUMP) & flags; in vm_page_alloc_contig_domain()
2403 m->busy_lock = busy_lock; in vm_page_alloc_contig_domain()
2405 m->ref_count = 1; in vm_page_alloc_contig_domain()
2406 m->a.act_count = 0; in vm_page_alloc_contig_domain()
2407 m->oflags = oflags; in vm_page_alloc_contig_domain()
2408 m->pool = VM_FREEPOOL_DEFAULT; in vm_page_alloc_contig_domain()
2409 if (vm_page_iter_insert(m, object, pindex, &pages)) { in vm_page_alloc_contig_domain()
2412 KASSERT(m->object == NULL, in vm_page_alloc_contig_domain()
2413 ("page %p has object", m)); in vm_page_alloc_contig_domain()
2414 mpred = m; in vm_page_alloc_contig_domain()
2415 for (m = m_ret; m < &m_ret[npages]; m++) { in vm_page_alloc_contig_domain()
2416 if (m <= mpred && in vm_page_alloc_contig_domain()
2418 m->ref_count = 0; in vm_page_alloc_contig_domain()
2419 m->oflags = VPO_UNMANAGED; in vm_page_alloc_contig_domain()
2420 m->busy_lock = VPB_UNBUSIED; in vm_page_alloc_contig_domain()
2422 vm_page_free_toq(m); in vm_page_alloc_contig_domain()
2432 pmap_page_set_memattr(m, memattr); in vm_page_alloc_contig_domain()
2439 * Allocate a physical page that is not intended to be inserted into a VM
2446 vm_page_t m; in vm_page_alloc_noobj_domain() local
2459 m = vm_page_alloc_nofree_domain(domain, req); in vm_page_alloc_noobj_domain()
2460 if (m != NULL) in vm_page_alloc_noobj_domain()
2464 if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) { in vm_page_alloc_noobj_domain()
2465 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone, in vm_page_alloc_noobj_domain()
2467 if (m != NULL) { in vm_page_alloc_noobj_domain()
2475 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0); in vm_page_alloc_noobj_domain()
2477 if (m == NULL) { in vm_page_alloc_noobj_domain()
2485 if (m == NULL) { in vm_page_alloc_noobj_domain()
2494 * have a pending deferred dequeue. Specifically, when the page is in vm_page_alloc_noobj_domain()
2495 * imported from a different pool by vm_phys_alloc_npages(), the in vm_page_alloc_noobj_domain()
2496 * second, third, etc. pages in a non-zero order set could have in vm_page_alloc_noobj_domain()
2499 vm_page_dequeue(m); in vm_page_alloc_noobj_domain()
2500 vm_page_alloc_check(m); in vm_page_alloc_noobj_domain()
2503 * Consumers should not rely on a useful default pindex value. in vm_page_alloc_noobj_domain()
2505 m->pindex = 0xdeadc0dedeadc0de; in vm_page_alloc_noobj_domain()
2506 m->flags = (m->flags & PG_ZERO) | flags; in vm_page_alloc_noobj_domain()
2507 m->a.flags = 0; in vm_page_alloc_noobj_domain()
2508 m->oflags = VPO_UNMANAGED; in vm_page_alloc_noobj_domain()
2509 m->pool = VM_FREEPOOL_DIRECT; in vm_page_alloc_noobj_domain()
2510 m->busy_lock = VPB_UNBUSIED; in vm_page_alloc_noobj_domain()
2513 m->ref_count = 1; in vm_page_alloc_noobj_domain()
2516 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) in vm_page_alloc_noobj_domain()
2517 pmap_zero_page(m); in vm_page_alloc_noobj_domain()
2519 return (m); in vm_page_alloc_noobj_domain()
2531 * Allocate a single NOFREE page.
2533 * This routine hands out NOFREE pages from higher-order
2535 * When a NOFREE for a given domain chunk is used up,
2536 * the routine will try to fetch a new one from the freelists
2542 vm_page_t m; in vm_page_alloc_nofree_domain() local
2549 if (TAILQ_EMPTY(&vmd->vmd_nofreeq)) { in vm_page_alloc_nofree_domain()
2557 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, in vm_page_alloc_nofree_domain()
2559 if (m == NULL) { in vm_page_alloc_nofree_domain()
2564 m->ref_count = count - 1; in vm_page_alloc_nofree_domain()
2565 TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q); in vm_page_alloc_nofree_domain()
2568 m = TAILQ_FIRST(&vmd->vmd_nofreeq); in vm_page_alloc_nofree_domain()
2569 TAILQ_REMOVE(&vmd->vmd_nofreeq, m, plinks.q); in vm_page_alloc_nofree_domain()
2570 if (m->ref_count > 0) { in vm_page_alloc_nofree_domain()
2573 m_next = &m[1]; in vm_page_alloc_nofree_domain()
2575 m_next->ref_count = m->ref_count - 1; in vm_page_alloc_nofree_domain()
2576 TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m_next, plinks.q); in vm_page_alloc_nofree_domain()
2577 m->ref_count = 0; in vm_page_alloc_nofree_domain()
2580 atomic_add_long(&nofreeq_size, -1); in vm_page_alloc_nofree_domain()
2583 return (m); in vm_page_alloc_nofree_domain()
2587 * Though a NOFREE page by definition should not be freed, we support putting
2593 vm_page_free_nofree(struct vm_domain *vmd, vm_page_t m) in vm_page_free_nofree() argument
2595 VM_CNT_ADD(v_nofree_count, -1); in vm_page_free_nofree()
2598 MPASS(m->ref_count == 0); in vm_page_free_nofree()
2599 TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q); in vm_page_free_nofree()
2607 vm_page_t m; in vm_page_alloc_noobj() local
2614 m = vm_page_alloc_noobj_domain(domain, req); in vm_page_alloc_noobj()
2615 if (m != NULL) in vm_page_alloc_noobj()
2619 return (m); in vm_page_alloc_noobj()
2628 vm_page_t m; in vm_page_alloc_noobj_contig() local
2635 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low, in vm_page_alloc_noobj_contig()
2637 if (m != NULL) in vm_page_alloc_noobj_contig()
2641 return (m); in vm_page_alloc_noobj_contig()
2649 vm_page_t m, m_ret; in vm_page_alloc_noobj_contig_domain() local
2675 for (m = m_ret; m < &m_ret[npages]; m++) { in vm_page_alloc_noobj_contig_domain()
2676 vm_page_dequeue(m); in vm_page_alloc_noobj_contig_domain()
2677 vm_page_alloc_check(m); in vm_page_alloc_noobj_contig_domain()
2680 * Consumers should not rely on a useful default pindex value. in vm_page_alloc_noobj_contig_domain()
2682 m->pindex = 0xdeadc0dedeadc0de; in vm_page_alloc_noobj_contig_domain()
2683 m->a.flags = 0; in vm_page_alloc_noobj_contig_domain()
2684 m->flags = (m->flags | PG_NODUMP) & flags; in vm_page_alloc_noobj_contig_domain()
2685 m->busy_lock = VPB_UNBUSIED; in vm_page_alloc_noobj_contig_domain()
2687 m->ref_count = 1; in vm_page_alloc_noobj_contig_domain()
2688 m->a.act_count = 0; in vm_page_alloc_noobj_contig_domain()
2689 m->oflags = VPO_UNMANAGED; in vm_page_alloc_noobj_contig_domain()
2690 m->pool = VM_FREEPOOL_DIRECT; in vm_page_alloc_noobj_contig_domain()
2695 * non-default memory attribute. pmap_page_set_memattr() in vm_page_alloc_noobj_contig_domain()
2698 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0) in vm_page_alloc_noobj_contig_domain()
2699 pmap_zero_page(m); in vm_page_alloc_noobj_contig_domain()
2701 pmap_page_set_memattr(m, memattr); in vm_page_alloc_noobj_contig_domain()
2707 * Check a page that has been freshly dequeued from a freelist.
2710 vm_page_alloc_check(vm_page_t m) in vm_page_alloc_check() argument
2713 KASSERT(m->object == NULL, ("page %p has object", m)); in vm_page_alloc_check()
2714 KASSERT(m->a.queue == PQ_NONE && in vm_page_alloc_check()
2715 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, in vm_page_alloc_check()
2717 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK))); in vm_page_alloc_check()
2718 KASSERT(m->ref_count == 0, ("page %p has references", m)); in vm_page_alloc_check()
2719 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m)); in vm_page_alloc_check()
2720 KASSERT(m->dirty == 0, ("page %p is dirty", m)); in vm_page_alloc_check()
2721 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, in vm_page_alloc_check()
2723 m, pmap_page_get_memattr(m))); in vm_page_alloc_check()
2724 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m)); in vm_page_alloc_check()
2725 pmap_vm_page_alloc_check(m); in vm_page_alloc_check()
2736 vmd = VM_DOMAIN(pgcache->domain); in vm_page_zone_import()
2742 if (vmd->vmd_severeset || curproc == pageproc || in vm_page_zone_import()
2745 domain = vmd->vmd_domain; in vm_page_zone_import()
2747 i = vm_phys_alloc_npages(domain, pgcache->pool, cnt, in vm_page_zone_import()
2751 vm_domain_freecnt_inc(vmd, cnt - i); in vm_page_zone_import()
2761 vm_page_t m; in vm_page_zone_release() local
2765 vmd = VM_DOMAIN(pgcache->domain); in vm_page_zone_release()
2768 m = (vm_page_t)store[i]; in vm_page_zone_release()
2769 vm_phys_free_pages(m, pgcache->pool, 0); in vm_page_zone_release()
2783 * "m_end" for a run of contiguous physical pages that satisfy the
2786 * page in the run. If the specified "boundary" is non-zero, then the
2787 * run of physical pages cannot span a physical address that is a
2790 * "m_end" is never dereferenced, so it need not point to a vm_page
2794 * span a hole (or discontiguity) in the physical address space. Both
2795 * "alignment" and "boundary" must be a power of two.
2803 vm_page_t m, m_run; in vm_page_scan_contig() local
2810 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); in vm_page_scan_contig()
2811 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); in vm_page_scan_contig()
2814 for (m = m_start; m < m_end && run_len < npages; m += m_inc) { in vm_page_scan_contig()
2815 KASSERT((m->flags & PG_MARKER) == 0, in vm_page_scan_contig()
2816 ("page %p is PG_MARKER", m)); in vm_page_scan_contig()
2817 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, in vm_page_scan_contig()
2818 ("fictitious page %p has invalid ref count", m)); in vm_page_scan_contig()
2821 * If the current page would be the start of a run, check its in vm_page_scan_contig()
2829 if (m + npages > m_end) in vm_page_scan_contig()
2831 pa = VM_PAGE_TO_PHYS(m); in vm_page_scan_contig()
2833 m_inc = atop(roundup2(pa, alignment) - pa); in vm_page_scan_contig()
2837 m_inc = atop(roundup2(pa, boundary) - pa); in vm_page_scan_contig()
2845 if (vm_page_wired(m)) in vm_page_scan_contig()
2848 else if ((level = vm_reserv_level(m)) >= 0 && in vm_page_scan_contig()
2852 pa = VM_PAGE_TO_PHYS(m); in vm_page_scan_contig()
2853 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - in vm_page_scan_contig()
2857 else if ((object = atomic_load_ptr(&m->object)) != NULL) { in vm_page_scan_contig()
2864 if (object != m->object) { in vm_page_scan_contig()
2869 if ((object->flags & OBJ_SWAP) == 0 && in vm_page_scan_contig()
2870 object->type != OBJT_VNODE) { in vm_page_scan_contig()
2874 (level = vm_reserv_level_iffullpop(m)) >= 0) { in vm_page_scan_contig()
2877 pa = VM_PAGE_TO_PHYS(m); in vm_page_scan_contig()
2879 vm_reserv_size(level)) - pa); in vm_page_scan_contig()
2881 } else if (object->memattr == VM_MEMATTR_DEFAULT && in vm_page_scan_contig()
2882 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { in vm_page_scan_contig()
2888 KASSERT(pmap_page_get_memattr(m) == in vm_page_scan_contig()
2890 ("page %p has an unexpected memattr", m)); in vm_page_scan_contig()
2891 KASSERT((m->oflags & (VPO_SWAPINPROG | in vm_page_scan_contig()
2893 ("page %p has unexpected oflags", m)); in vm_page_scan_contig()
2908 } else if ((order = m->order) < VM_NFREEORDER) { in vm_page_scan_contig()
2912 * first page in a power-of-two-sized run of in vm_page_scan_contig()
2923 * page in a run of contiguous free pages. (This case in vm_page_scan_contig()
2940 m_run = m; in vm_page_scan_contig()
2958 * specified run of physical pages to a new physical address. Free the
2959 * physical pages underlying the relocated virtual pages. A virtual page
2961 * the page daemon. Whenever possible, a virtual page is relocated to a
2965 * just freed by a successful relocation. Otherwise, returns a non-zero
2966 * value indicating why the last attempt to relocate a virtual page was
2979 vm_page_t m, m_end, m_new; in vm_page_reclaim_run() local
2986 m = m_run; in vm_page_reclaim_run()
2988 for (; error == 0 && m < m_end; m++) { in vm_page_reclaim_run()
2989 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, in vm_page_reclaim_run()
2990 ("page %p is PG_FICTITIOUS or PG_MARKER", m)); in vm_page_reclaim_run()
2996 if (vm_page_wired(m)) in vm_page_reclaim_run()
2998 else if ((object = atomic_load_ptr(&m->object)) != NULL) { in vm_page_reclaim_run()
3005 if (m->object != object || in vm_page_reclaim_run()
3006 ((object->flags & OBJ_SWAP) == 0 && in vm_page_reclaim_run()
3007 object->type != OBJT_VNODE)) in vm_page_reclaim_run()
3009 else if (object->memattr != VM_MEMATTR_DEFAULT) in vm_page_reclaim_run()
3011 else if (vm_page_queue(m) != PQ_NONE && in vm_page_reclaim_run()
3012 vm_page_tryxbusy(m) != 0) { in vm_page_reclaim_run()
3013 if (vm_page_wired(m)) { in vm_page_reclaim_run()
3014 vm_page_xunbusy(m); in vm_page_reclaim_run()
3018 KASSERT(pmap_page_get_memattr(m) == in vm_page_reclaim_run()
3020 ("page %p has an unexpected memattr", m)); in vm_page_reclaim_run()
3021 KASSERT(m->oflags == 0, in vm_page_reclaim_run()
3022 ("page %p has unexpected oflags", m)); in vm_page_reclaim_run()
3024 if (!vm_page_none_valid(m)) { in vm_page_reclaim_run()
3026 * First, try to allocate a new page in vm_page_reclaim_run()
3028 * that, try to allocate a new page in vm_page_reclaim_run()
3031 * "m_run" and "high" only as a last in vm_page_reclaim_run()
3035 if ((m->flags & PG_NODUMP) != 0) in vm_page_reclaim_run()
3050 req, 1, 0, pa - 1, in vm_page_reclaim_run()
3062 vm_page_xunbusy(m); in vm_page_reclaim_run()
3070 * through a pmap lookup. in vm_page_reclaim_run()
3072 if (object->ref_count != 0 && in vm_page_reclaim_run()
3073 !vm_page_try_remove_all(m)) { in vm_page_reclaim_run()
3074 vm_page_xunbusy(m); in vm_page_reclaim_run()
3081 * Replace "m" with the new page. For in vm_page_reclaim_run()
3082 * vm_page_replace(), "m" must be busy in vm_page_reclaim_run()
3083 * and dequeued. Finally, change "m" in vm_page_reclaim_run()
3086 m_new->a.flags = m->a.flags & in vm_page_reclaim_run()
3088 KASSERT(m_new->oflags == VPO_UNMANAGED, in vm_page_reclaim_run()
3090 m_new->oflags = 0; in vm_page_reclaim_run()
3091 pmap_copy_page(m, m_new); in vm_page_reclaim_run()
3092 m_new->valid = m->valid; in vm_page_reclaim_run()
3093 m_new->dirty = m->dirty; in vm_page_reclaim_run()
3094 m->flags &= ~PG_ZERO; in vm_page_reclaim_run()
3095 vm_page_dequeue(m); in vm_page_reclaim_run()
3097 m->pindex, m) && in vm_page_reclaim_run()
3098 vm_page_free_prep(m)) in vm_page_reclaim_run()
3099 SLIST_INSERT_HEAD(&free, m, in vm_page_reclaim_run()
3108 m->flags &= ~PG_ZERO; in vm_page_reclaim_run()
3109 vm_page_dequeue(m); in vm_page_reclaim_run()
3110 if (vm_page_free_prep(m)) in vm_page_reclaim_run()
3111 SLIST_INSERT_HEAD(&free, m, in vm_page_reclaim_run()
3113 KASSERT(m->dirty == 0, in vm_page_reclaim_run()
3114 ("page %p is dirty", m)); in vm_page_reclaim_run()
3121 MPASS(vm_page_domain(m) == domain); in vm_page_reclaim_run()
3124 order = m->order; in vm_page_reclaim_run()
3129 * is the first page in a power-of-two-sized in vm_page_reclaim_run()
3134 m += (1 << order) - 1; in vm_page_reclaim_run()
3137 else if (vm_reserv_is_page_free(m)) in vm_page_reclaim_run()
3145 if ((m = SLIST_FIRST(&free)) != NULL) { in vm_page_reclaim_run()
3152 MPASS(vm_page_domain(m) == domain); in vm_page_reclaim_run()
3154 vm_phys_free_pages(m, m->pool, 0); in vm_page_reclaim_run()
3156 } while ((m = SLIST_FIRST(&free)) != NULL); in vm_page_reclaim_run()
3178 * fail with ENOMEM due to a shortage of free pages. When reclamation
3180 * retrying a failed allocation operation, e.g., vm_page_alloc_contig().
3186 * VM_ALLOC_SYSTEM system *really* needs a page
3192 * must be a power of two.
3207 KASSERT(powerof2(alignment), ("alignment is not a power of 2")); in vm_page_reclaim_contig_domain_ext()
3208 KASSERT(powerof2(boundary), ("boundary is not a power of 2")); in vm_page_reclaim_contig_domain_ext()
3227 nruns = NRUNS + desired_runs - 1; in vm_page_reclaim_contig_domain_ext()
3238 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1); in vm_page_reclaim_contig_domain_ext()
3257 count = vmd->vmd_free_count; in vm_page_reclaim_contig_domain_ext()
3258 if (count < npages + vmd->vmd_free_reserved || (count < npages + in vm_page_reclaim_contig_domain_ext()
3259 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || in vm_page_reclaim_contig_domain_ext()
3277 npages, low, high)) != -1) { in vm_page_reclaim_contig_domain_ext()
3302 count--; in vm_page_reclaim_contig_domain_ext()
3380 if (!vmd->vmd_minset && vm_paging_min(vmd)) { in vm_domain_set()
3381 vmd->vmd_minset = 1; in vm_domain_set()
3382 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains); in vm_domain_set()
3384 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) { in vm_domain_set()
3385 vmd->vmd_severeset = 1; in vm_domain_set()
3386 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains); in vm_domain_set()
3399 if (vmd->vmd_minset && !vm_paging_min(vmd)) { in vm_domain_clear()
3400 vmd->vmd_minset = 0; in vm_domain_clear()
3401 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains); in vm_domain_clear()
3407 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) { in vm_domain_clear()
3408 vmd->vmd_severeset = 0; in vm_domain_clear()
3409 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains); in vm_domain_clear()
3420 if (vmd->vmd_pageout_pages_needed && in vm_domain_clear()
3421 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) { in vm_domain_clear()
3422 wakeup(&vmd->vmd_pageout_pages_needed); in vm_domain_clear()
3423 vmd->vmd_pageout_pages_needed = 0; in vm_domain_clear()
3481 * locking for the pageproc when sleeping with a non-specific vm_wait. in vm_wait_doms()
3485 * race-free vm_wait_domain(). in vm_wait_doms()
3515 * - Called in various places after failed memory allocations.
3528 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { in vm_wait_domain()
3529 vmd->vmd_pageout_pages_needed = 1; in vm_wait_domain()
3530 msleep(&vmd->vmd_pageout_pages_needed, in vm_wait_domain()
3536 DOMAINSET_SET(vmd->vmd_domain, &wdom); in vm_wait_domain()
3553 d = obj->domain.dr_policy; in vm_wait_flags()
3555 d = curthread->td_domain.dr_policy; in vm_wait_flags()
3557 return (vm_wait_doms(&d->ds_mask, mflags)); in vm_wait_flags()
3583 * Called when a page allocation function fails. Informs the
3596 atomic_add_int(&vmd->vmd_pageout_deficit, in vm_domain_alloc_fail()
3601 vm_wait_domain(vmd->vmd_domain); in vm_domain_alloc_fail()
3615 * - Called only in vm_fault so that processes page faulting
3617 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3631 if (vm_page_count_min_set(&dset->ds_mask)) { in vm_waitpfault()
3640 _vm_page_pagequeue(vm_page_t m, uint8_t queue) in _vm_page_pagequeue() argument
3643 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); in _vm_page_pagequeue()
3648 vm_page_pagequeue(vm_page_t m) in vm_page_pagequeue() argument
3651 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue)); in vm_page_pagequeue()
3656 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, in vm_page_pqstate_fcmpset() argument
3663 if (__predict_true(vm_page_astate_fcmpset(m, old, new))) in vm_page_pqstate_fcmpset()
3666 } while (old->_bits == tmp._bits); in vm_page_pqstate_fcmpset()
3672 * Do the work of committing a queue state update that moves the page out of
3676 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, in _vm_page_pqstate_commit_dequeue() argument
3682 KASSERT(vm_page_pagequeue(m) == pq, in _vm_page_pqstate_commit_dequeue()
3683 ("%s: queue %p does not match page %p", __func__, pq, m)); in _vm_page_pqstate_commit_dequeue()
3684 KASSERT(old->queue != PQ_NONE && new.queue != old->queue, in _vm_page_pqstate_commit_dequeue()
3686 __func__, old->queue, new.queue)); in _vm_page_pqstate_commit_dequeue()
3696 if ((old->flags & PGA_ENQUEUED) != 0) { in _vm_page_pqstate_commit_dequeue()
3698 next = TAILQ_NEXT(m, plinks.q); in _vm_page_pqstate_commit_dequeue()
3699 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); in _vm_page_pqstate_commit_dequeue()
3701 if (!vm_page_pqstate_fcmpset(m, old, new)) { in _vm_page_pqstate_commit_dequeue()
3703 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); in _vm_page_pqstate_commit_dequeue()
3705 TAILQ_INSERT_BEFORE(next, m, plinks.q); in _vm_page_pqstate_commit_dequeue()
3712 return (vm_page_pqstate_fcmpset(m, old, new)); in _vm_page_pqstate_commit_dequeue()
3717 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, in vm_page_pqstate_commit_dequeue() argument
3724 pq = _vm_page_pagequeue(m, old->queue); in vm_page_pqstate_commit_dequeue()
3731 as = vm_page_astate_load(m); in vm_page_pqstate_commit_dequeue()
3732 if (__predict_false(as._bits != old->_bits)) { in vm_page_pqstate_commit_dequeue()
3736 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new); in vm_page_pqstate_commit_dequeue()
3743 * Commit a queue state update that enqueues or requeues a page.
3746 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, in _vm_page_pqstate_commit_requeue() argument
3752 KASSERT(old->queue != PQ_NONE && new.queue == old->queue, in _vm_page_pqstate_commit_requeue()
3754 __func__, old->queue, new.queue)); in _vm_page_pqstate_commit_requeue()
3757 if (!vm_page_pqstate_fcmpset(m, old, new)) in _vm_page_pqstate_commit_requeue()
3760 if ((old->flags & PGA_ENQUEUED) != 0) in _vm_page_pqstate_commit_requeue()
3761 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); in _vm_page_pqstate_commit_requeue()
3770 if ((old->flags & PGA_REQUEUE_HEAD) != 0) { in _vm_page_pqstate_commit_requeue()
3771 vmd = vm_pagequeue_domain(m); in _vm_page_pqstate_commit_requeue()
3772 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE], in _vm_page_pqstate_commit_requeue()
3773 ("%s: invalid page queue for page %p", __func__, m)); in _vm_page_pqstate_commit_requeue()
3774 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q); in _vm_page_pqstate_commit_requeue()
3776 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); in _vm_page_pqstate_commit_requeue()
3782 * Commit a queue state update that encodes a request for a deferred queue
3786 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, in vm_page_pqstate_commit_request() argument
3790 KASSERT(old->queue == new.queue || new.queue != PQ_NONE, in vm_page_pqstate_commit_request()
3794 if (old->_bits != new._bits && in vm_page_pqstate_commit_request()
3795 !vm_page_pqstate_fcmpset(m, old, new)) in vm_page_pqstate_commit_request()
3797 vm_page_pqbatch_submit(m, new.queue); in vm_page_pqstate_commit_request()
3802 * A generic queue state update function. This handles more cases than the
3806 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) in vm_page_pqstate_commit() argument
3809 if (old->_bits == new._bits) in vm_page_pqstate_commit()
3812 if (old->queue != PQ_NONE && new.queue != old->queue) { in vm_page_pqstate_commit()
3813 if (!vm_page_pqstate_commit_dequeue(m, old, new)) in vm_page_pqstate_commit()
3816 vm_page_pqbatch_submit(m, new.queue); in vm_page_pqstate_commit()
3818 if (!vm_page_pqstate_fcmpset(m, old, new)) in vm_page_pqstate_commit()
3821 ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0) in vm_page_pqstate_commit()
3822 vm_page_pqbatch_submit(m, new.queue); in vm_page_pqstate_commit()
3828 * Apply deferred queue state updates to a page.
3831 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue) in vm_pqbatch_process_page() argument
3839 KASSERT(pq == _vm_page_pagequeue(m, queue), in vm_pqbatch_process_page()
3840 ("%s: page %p does not belong to queue %p", __func__, m, pq)); in vm_pqbatch_process_page()
3842 for (old = vm_page_astate_load(m);;) { in vm_pqbatch_process_page()
3848 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_pqbatch_process_page()
3849 ("%s: page %p is unmanaged", __func__, m)); in vm_pqbatch_process_page()
3856 m, &old, new))) { in vm_pqbatch_process_page()
3863 m, &old, new))) { in vm_pqbatch_process_page()
3877 for (i = 0; i < bq->bq_cnt; i++) in vm_pqbatch_process()
3878 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue); in vm_pqbatch_process()
3885 * Enqueue a page in the specified page queue's batched work queue.
3887 * structure's a.flags field.
3890 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) in vm_page_pqbatch_submit() argument
3898 domain = vm_page_domain(m); in vm_page_pqbatch_submit()
3901 slots_remaining = vm_batchqueue_insert(bq, m); in vm_page_pqbatch_submit()
3908 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; in vm_page_pqbatch_submit()
3920 pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue]; in vm_page_pqbatch_submit()
3925 vm_pqbatch_process_page(pq, m, queue); in vm_page_pqbatch_submit()
3933 * Force all per-CPU page queue batch queues to be drained. This is
3954 pq = &vmd->vmd_pagequeues[queue]; in vm_page_pqbatch_drain()
3977 vm_page_dequeue_deferred(vm_page_t m) in vm_page_dequeue_deferred() argument
3981 old = vm_page_astate_load(m); in vm_page_dequeue_deferred()
3986 __func__, m)); in vm_page_dequeue_deferred()
3991 } while (!vm_page_pqstate_commit_request(m, &old, new)); in vm_page_dequeue_deferred()
4001 vm_page_dequeue(vm_page_t m) in vm_page_dequeue() argument
4005 old = vm_page_astate_load(m); in vm_page_dequeue()
4010 __func__, m)); in vm_page_dequeue()
4016 } while (!vm_page_pqstate_commit_dequeue(m, &old, new)); in vm_page_dequeue()
4025 vm_page_enqueue(vm_page_t m, uint8_t queue) in vm_page_enqueue() argument
4028 KASSERT(m->a.queue == PQ_NONE && in vm_page_enqueue()
4029 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0, in vm_page_enqueue()
4030 ("%s: page %p is already enqueued", __func__, m)); in vm_page_enqueue()
4031 KASSERT(m->ref_count > 0, in vm_page_enqueue()
4032 ("%s: page %p does not carry any references", __func__, m)); in vm_page_enqueue()
4034 m->a.queue = queue; in vm_page_enqueue()
4035 if ((m->a.flags & PGA_REQUEUE) == 0) in vm_page_enqueue()
4036 vm_page_aflag_set(m, PGA_REQUEUE); in vm_page_enqueue()
4037 vm_page_pqbatch_submit(m, queue); in vm_page_enqueue()
4048 * be xbusy. Otherwise the page must be not busied. A managed
4052 vm_page_free_prep(vm_page_t m) in vm_page_free_prep() argument
4056 * Synchronize with threads that have dropped a reference to this in vm_page_free_prep()
4062 if (vm_check_pg_zero && (m->flags & PG_ZERO) != 0) { in vm_page_free_prep()
4068 sf = sf_buf_alloc(m, SFB_CPUPRIVATE | SFB_NOWAIT); in vm_page_free_prep()
4074 m, i, (uintmax_t)*p)); in vm_page_free_prep()
4081 if ((m->oflags & VPO_UNMANAGED) == 0) { in vm_page_free_prep()
4082 KASSERT(!pmap_page_is_mapped(m), in vm_page_free_prep()
4083 ("vm_page_free_prep: freeing mapped page %p", m)); in vm_page_free_prep()
4084 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0, in vm_page_free_prep()
4085 ("vm_page_free_prep: mapping flags set in page %p", m)); in vm_page_free_prep()
4087 KASSERT(m->a.queue == PQ_NONE, in vm_page_free_prep()
4088 ("vm_page_free_prep: unmanaged page %p is queued", m)); in vm_page_free_prep()
4092 if (m->object != NULL) { in vm_page_free_prep()
4093 vm_page_radix_remove(m); in vm_page_free_prep()
4094 vm_page_free_object_prep(m); in vm_page_free_prep()
4096 vm_page_assert_unbusied(m); in vm_page_free_prep()
4098 vm_page_busy_free(m); in vm_page_free_prep()
4104 if ((m->flags & PG_FICTITIOUS) != 0) { in vm_page_free_prep()
4105 KASSERT(m->ref_count == 1, in vm_page_free_prep()
4106 ("fictitious page %p is referenced", m)); in vm_page_free_prep()
4107 KASSERT(m->a.queue == PQ_NONE, in vm_page_free_prep()
4108 ("fictitious page %p is queued", m)); in vm_page_free_prep()
4114 * memory allocator, but they must at least be marked for a deferred in vm_page_free_prep()
4117 if ((m->oflags & VPO_UNMANAGED) == 0) in vm_page_free_prep()
4118 vm_page_dequeue_deferred(m); in vm_page_free_prep()
4120 m->valid = 0; in vm_page_free_prep()
4121 vm_page_undirty(m); in vm_page_free_prep()
4123 if (m->ref_count != 0) in vm_page_free_prep()
4124 panic("vm_page_free_prep: page %p has references", m); in vm_page_free_prep()
4129 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) in vm_page_free_prep()
4130 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); in vm_page_free_prep()
4134 * Determine whether the page belongs to a reservation. If the page was in vm_page_free_prep()
4135 * allocated from a per-CPU cache, it cannot belong to a reservation, so in vm_page_free_prep()
4138 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m)) in vm_page_free_prep()
4155 vm_page_free_toq(vm_page_t m) in vm_page_free_toq() argument
4160 if (!vm_page_free_prep(m)) in vm_page_free_toq()
4163 vmd = vm_pagequeue_domain(m); in vm_page_free_toq()
4164 if (__predict_false((m->flags & PG_NOFREE) != 0)) { in vm_page_free_toq()
4165 vm_page_free_nofree(vmd, m); in vm_page_free_toq()
4168 zone = vmd->vmd_pgcache[m->pool].zone; in vm_page_free_toq()
4169 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) { in vm_page_free_toq()
4170 uma_zfree(zone, m); in vm_page_free_toq()
4174 vm_phys_free_pages(m, m->pool, 0); in vm_page_free_toq()
4182 * Returns a list of pages to the free list, disassociating it
4184 * calling vm_page_free_toq() for each page of a list of VM objects.
4189 vm_page_t m; in vm_page_free_pages_toq() local
4196 while ((m = SLIST_FIRST(free)) != NULL) { in vm_page_free_pages_toq()
4199 vm_page_free_toq(m); in vm_page_free_pages_toq()
4212 vm_page_wire(vm_page_t m) in vm_page_wire() argument
4217 if (m->object != NULL && !vm_page_busied(m) && in vm_page_wire()
4218 !vm_object_busied(m->object)) in vm_page_wire()
4219 VM_OBJECT_ASSERT_LOCKED(m->object); in vm_page_wire()
4221 KASSERT((m->flags & PG_FICTITIOUS) == 0 || in vm_page_wire()
4222 VPRC_WIRE_COUNT(m->ref_count) >= 1, in vm_page_wire()
4223 ("vm_page_wire: fictitious page %p has zero wirings", m)); in vm_page_wire()
4225 old = atomic_fetchadd_int(&m->ref_count, 1); in vm_page_wire()
4227 ("vm_page_wire: counter overflow for page %p", m)); in vm_page_wire()
4229 if ((m->oflags & VPO_UNMANAGED) == 0) in vm_page_wire()
4230 vm_page_aflag_set(m, PGA_DEQUEUE); in vm_page_wire()
4236 * Attempt to wire a mapped page following a pmap lookup of that page.
4237 * This may fail if a thread is concurrently tearing down mappings of the page.
4243 vm_page_wire_mapped(vm_page_t m) in vm_page_wire_mapped() argument
4247 old = atomic_load_int(&m->ref_count); in vm_page_wire_mapped()
4250 ("vm_page_wire_mapped: wiring unreferenced page %p", m)); in vm_page_wire_mapped()
4253 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); in vm_page_wire_mapped()
4256 if ((m->oflags & VPO_UNMANAGED) == 0) in vm_page_wire_mapped()
4257 vm_page_aflag_set(m, PGA_DEQUEUE); in vm_page_wire_mapped()
4264 * Release a wiring reference to a managed page. If the page still belongs to
4269 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse) in vm_page_unwire_managed() argument
4273 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_page_unwire_managed()
4274 ("%s: page %p is unmanaged", __func__, m)); in vm_page_unwire_managed()
4278 * Use a release store when updating the reference count to in vm_page_unwire_managed()
4281 old = atomic_load_int(&m->ref_count); in vm_page_unwire_managed()
4286 ("vm_page_unwire: wire count underflow for page %p", m)); in vm_page_unwire_managed()
4294 * re-set it if necessary. in vm_page_unwire_managed()
4296 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0) in vm_page_unwire_managed()
4297 vm_page_aflag_set(m, PGA_DEQUEUE); in vm_page_unwire_managed()
4306 vm_page_release_toq(m, nqueue, noreuse); in vm_page_unwire_managed()
4308 vm_page_aflag_clear(m, PGA_DEQUEUE); in vm_page_unwire_managed()
4310 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); in vm_page_unwire_managed()
4315 vm_page_free(m); in vm_page_unwire_managed()
4329 vm_page_unwire(vm_page_t m, uint8_t nqueue) in vm_page_unwire() argument
4334 nqueue, m)); in vm_page_unwire()
4336 if ((m->oflags & VPO_UNMANAGED) != 0) { in vm_page_unwire()
4337 if (vm_page_unwire_noq(m) && m->ref_count == 0) in vm_page_unwire()
4338 vm_page_free(m); in vm_page_unwire()
4341 vm_page_unwire_managed(m, nqueue, false); in vm_page_unwire()
4345 * Unwire a page without (re-)inserting it into a page queue. It is up
4351 vm_page_unwire_noq(vm_page_t m) in vm_page_unwire_noq() argument
4355 old = vm_page_drop(m, 1); in vm_page_unwire_noq()
4357 ("%s: counter underflow for page %p", __func__, m)); in vm_page_unwire_noq()
4358 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, in vm_page_unwire_noq()
4359 ("%s: missing ref on fictitious page %p", __func__, m)); in vm_page_unwire_noq()
4363 if ((m->oflags & VPO_UNMANAGED) == 0) in vm_page_unwire_noq()
4364 vm_page_aflag_clear(m, PGA_DEQUEUE); in vm_page_unwire_noq()
4375 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag) in vm_page_mvqueue() argument
4379 KASSERT(m->ref_count > 0, in vm_page_mvqueue()
4380 ("%s: page %p does not carry any references", __func__, m)); in vm_page_mvqueue()
4384 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) in vm_page_mvqueue()
4387 old = vm_page_astate_load(m); in vm_page_mvqueue()
4407 } while (!vm_page_pqstate_commit(m, &old, new)); in vm_page_mvqueue()
4414 vm_page_activate(vm_page_t m) in vm_page_activate() argument
4417 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE); in vm_page_activate()
4425 vm_page_deactivate(vm_page_t m) in vm_page_deactivate() argument
4428 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE); in vm_page_deactivate()
4432 vm_page_deactivate_noreuse(vm_page_t m) in vm_page_deactivate_noreuse() argument
4435 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD); in vm_page_deactivate_noreuse()
4439 * Put a page in the laundry, or requeue it if it is already there.
4442 vm_page_launder(vm_page_t m) in vm_page_launder() argument
4445 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE); in vm_page_launder()
4449 * Put a page in the PQ_UNSWAPPABLE holding queue.
4452 vm_page_unswappable(vm_page_t m) in vm_page_unswappable() argument
4455 VM_OBJECT_ASSERT_LOCKED(m->object); in vm_page_unswappable()
4456 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_page_unswappable()
4457 ("page %p already unswappable", m)); in vm_page_unswappable()
4459 vm_page_dequeue(m); in vm_page_unswappable()
4460 vm_page_enqueue(m, PQ_UNSWAPPABLE); in vm_page_unswappable()
4464 * Release a page back to the page queues in preparation for unwiring.
4467 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse) in vm_page_release_toq() argument
4473 * Use a check of the valid bits to determine whether we should in vm_page_release_toq()
4476 * accelerate reclamation of a valid page and violate LRU, or in vm_page_release_toq()
4482 if (noreuse || vm_page_none_valid(m)) { in vm_page_release_toq()
4489 old = vm_page_astate_load(m); in vm_page_release_toq()
4506 } while (!vm_page_pqstate_commit(m, &old, new)); in vm_page_release_toq()
4510 * Unwire a page and either attempt to free it or re-add it to the page queues.
4513 vm_page_release(vm_page_t m, int flags) in vm_page_release() argument
4517 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_page_release()
4518 ("vm_page_release: page %p is unmanaged", m)); in vm_page_release()
4522 object = atomic_load_ptr(&m->object); in vm_page_release()
4525 /* Depends on type-stability. */ in vm_page_release()
4526 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object)) in vm_page_release()
4528 if (object == m->object) { in vm_page_release()
4529 vm_page_release_locked(m, flags); in vm_page_release()
4536 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0); in vm_page_release()
4541 vm_page_release_locked(vm_page_t m, int flags) in vm_page_release_locked() argument
4544 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_page_release_locked()
4545 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_page_release_locked()
4546 ("vm_page_release_locked: page %p is unmanaged", m)); in vm_page_release_locked()
4548 if (vm_page_unwire_noq(m)) { in vm_page_release_locked()
4550 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && in vm_page_release_locked()
4551 m->dirty == 0 && vm_page_tryxbusy(m)) { in vm_page_release_locked()
4557 if (__predict_true(!vm_page_wired(m))) { in vm_page_release_locked()
4558 vm_page_free(m); in vm_page_release_locked()
4561 vm_page_xunbusy(m); in vm_page_release_locked()
4563 vm_page_release_toq(m, PQ_INACTIVE, flags != 0); in vm_page_release_locked()
4569 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) in vm_page_try_blocked_op() argument
4573 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, in vm_page_try_blocked_op()
4574 ("vm_page_try_blocked_op: page %p has no object", m)); in vm_page_try_blocked_op()
4575 KASSERT(vm_page_busied(m), in vm_page_try_blocked_op()
4576 ("vm_page_try_blocked_op: page %p is not busy", m)); in vm_page_try_blocked_op()
4577 VM_OBJECT_ASSERT_LOCKED(m->object); in vm_page_try_blocked_op()
4579 old = atomic_load_int(&m->ref_count); in vm_page_try_blocked_op()
4582 ("vm_page_try_blocked_op: page %p has no references", m)); in vm_page_try_blocked_op()
4584 ("vm_page_try_blocked_op: page %p blocks wirings", m)); in vm_page_try_blocked_op()
4587 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); in vm_page_try_blocked_op()
4589 (op)(m); in vm_page_try_blocked_op()
4592 * If the object is read-locked, new wirings may be created via an in vm_page_try_blocked_op()
4595 old = vm_page_drop(m, VPRC_BLOCKED); in vm_page_try_blocked_op()
4596 KASSERT(!VM_OBJECT_WOWNED(m->object) || in vm_page_try_blocked_op()
4599 old, m)); in vm_page_try_blocked_op()
4607 vm_page_try_remove_all(vm_page_t m) in vm_page_try_remove_all() argument
4610 return (vm_page_try_blocked_op(m, pmap_remove_all)); in vm_page_try_remove_all()
4617 vm_page_try_remove_write(vm_page_t m) in vm_page_try_remove_write() argument
4620 return (vm_page_try_blocked_op(m, pmap_remove_write)); in vm_page_try_remove_write()
4629 vm_page_advise(vm_page_t m, int advice) in vm_page_advise() argument
4632 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_page_advise()
4633 vm_page_assert_xbusied(m); in vm_page_advise()
4640 * would result in a page fault on a later access. in vm_page_advise()
4642 vm_page_undirty(m); in vm_page_advise()
4645 vm_page_activate(m); in vm_page_advise()
4649 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) in vm_page_advise()
4650 vm_page_dirty(m); in vm_page_advise()
4656 vm_page_aflag_clear(m, PGA_REFERENCED); in vm_page_advise()
4664 if (m->dirty == 0) in vm_page_advise()
4665 vm_page_deactivate_noreuse(m); in vm_page_advise()
4666 else if (!vm_page_in_laundry(m)) in vm_page_advise()
4667 vm_page_launder(m); in vm_page_advise()
4676 vm_page_grab_release(vm_page_t m, int allocflags) in vm_page_grab_release() argument
4681 vm_page_sunbusy(m); in vm_page_grab_release()
4683 vm_page_xunbusy(m); in vm_page_grab_release()
4698 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, in vm_page_grab_sleep() argument
4710 vm_page_reference(m); in vm_page_grab_sleep()
4712 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) && in vm_page_grab_sleep()
4757 * Grab a page, waiting until we are woken up due to the page changing state.
4764 * Return a grabbed page, or NULL. Set *found if a page was found, whether or
4771 vm_page_t m; in vm_page_grab_lookup() local
4773 while ((*found = (m = vm_radix_iter_lookup(pages, pindex)) != NULL) && in vm_page_grab_lookup()
4774 !vm_page_tryacquire(m, allocflags)) { in vm_page_grab_lookup()
4775 if (!vm_page_grab_sleep(object, m, pindex, "pgrbwt", in vm_page_grab_lookup()
4780 return (m); in vm_page_grab_lookup()
4784 * Grab a page. Use an iterator parameter. Keep on waiting, as long as the page
4795 vm_page_t m; in vm_page_grab_iter() local
4801 while ((m = vm_page_grab_lookup( in vm_page_grab_iter()
4808 m = vm_page_alloc_iter(object, pindex, in vm_page_grab_iter()
4810 if (m != NULL) { in vm_page_grab_iter()
4812 (m->flags & PG_ZERO) == 0) in vm_page_grab_iter()
4813 pmap_zero_page(m); in vm_page_grab_iter()
4820 vm_page_grab_release(m, allocflags); in vm_page_grab_iter()
4822 return (m); in vm_page_grab_iter()
4826 * Grab a page. Keep on waiting, as long as the page exists in the object. If
4843 * Attempt to validate a page, locklessly acquiring it if necessary, given a
4846 * requested. A NULL page returned guarantees that the page was not in radix at
4848 * retry the operation under a lock if they require an atomic answer. This is
4857 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m, in vm_page_acquire_unlocked() argument
4860 if (m == NULL) in vm_page_acquire_unlocked()
4861 m = vm_page_lookup_unlocked(object, pindex); in vm_page_acquire_unlocked()
4862 for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) { in vm_page_acquire_unlocked()
4863 if (vm_page_trybusy(m, allocflags)) { in vm_page_acquire_unlocked()
4864 if (m->object == object && m->pindex == pindex) { in vm_page_acquire_unlocked()
4866 vm_page_wire(m); in vm_page_acquire_unlocked()
4867 vm_page_grab_release(m, allocflags); in vm_page_acquire_unlocked()
4871 vm_page_busy_release(m); in vm_page_acquire_unlocked()
4875 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", in vm_page_acquire_unlocked()
4879 return (m); in vm_page_acquire_unlocked()
4883 * Try to locklessly grab a page and fall back to the object lock if NOCREAT
4889 vm_page_t m; in vm_page_grab_unlocked() local
4892 m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags); in vm_page_grab_unlocked()
4893 if (m == PAGE_NOT_ACQUIRED) in vm_page_grab_unlocked()
4895 if (m != NULL) in vm_page_grab_unlocked()
4896 return (m); in vm_page_grab_unlocked()
4899 * The radix lockless lookup should never return a false negative in vm_page_grab_unlocked()
4901 * was no page present at the instant of the call. A NOCREAT caller in vm_page_grab_unlocked()
4908 m = vm_page_grab(object, pindex, allocflags); in vm_page_grab_unlocked()
4911 return (m); in vm_page_grab_unlocked()
4915 * Grab a page and make it valid, paging in if necessary. Use an iterator
4917 * a VM_ALLOC_COUNT is supplied and the page is not valid as many as
4919 * will be left on a paging queue but will neither be wired nor busy regardless
4926 vm_page_t m; in vm_page_grab_valid_iter() local
4942 if ((m = vm_radix_iter_lookup(pages, pindex)) != NULL) { in vm_page_grab_valid_iter()
4949 * However, we will not end up with an invalid page and a in vm_page_grab_valid_iter()
4952 if (!vm_page_trybusy(m, in vm_page_grab_valid_iter()
4953 vm_page_all_valid(m) ? allocflags : 0)) { in vm_page_grab_valid_iter()
4954 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", in vm_page_grab_valid_iter()
4959 if (vm_page_all_valid(m)) in vm_page_grab_valid_iter()
4962 vm_page_busy_release(m); in vm_page_grab_valid_iter()
4970 m = vm_page_alloc_iter(object, pindex, pflags, pages); in vm_page_grab_valid_iter()
4971 if (m == NULL) { in vm_page_grab_valid_iter()
4980 vm_page_assert_xbusied(m); in vm_page_grab_valid_iter()
4985 ma[0] = m; in vm_page_grab_valid_iter()
4988 m = vm_radix_iter_lookup_ge(pages, pindex + i); in vm_page_grab_valid_iter()
4990 if (m != NULL) in vm_page_grab_valid_iter()
4991 ahead = MIN(ahead, m->pindex - pindex); in vm_page_grab_valid_iter()
4998 if (m == NULL || m->pindex != pindex + i || in vm_page_grab_valid_iter()
4999 vm_page_any_valid(m) || !vm_page_tryxbusy(m)) in vm_page_grab_valid_iter()
5001 ma[i] = m; in vm_page_grab_valid_iter()
5010 /* Pager may have replaced a page. */ in vm_page_grab_valid_iter()
5011 m = ma[0]; in vm_page_grab_valid_iter()
5024 MPASS(vm_page_all_valid(m)); in vm_page_grab_valid_iter()
5026 vm_page_zero_invalid(m, TRUE); in vm_page_grab_valid_iter()
5031 vm_page_wire(m); in vm_page_grab_valid_iter()
5032 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) in vm_page_grab_valid_iter()
5033 vm_page_busy_downgrade(m); in vm_page_grab_valid_iter()
5035 vm_page_busy_release(m); in vm_page_grab_valid_iter()
5036 *mp = m; in vm_page_grab_valid_iter()
5041 * Grab a page and make it valid, paging in if necessary. Pages missing from
5042 * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied
5044 * in simultaneously. Additional pages will be left on a paging queue but
5060 * Grab a page. Keep on waiting, as long as the page exists in the object. If
5072 vm_page_t m; in vm_page_grab_zero_partial() local
5078 KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base, in vm_page_grab_zero_partial()
5083 while ((m = vm_page_grab_lookup( in vm_page_grab_zero_partial()
5087 m = vm_page_alloc_iter(object, pindex, in vm_page_grab_zero_partial()
5089 if (m != NULL) { in vm_page_grab_zero_partial()
5092 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); in vm_page_grab_zero_partial()
5096 vm_page_free(m); in vm_page_grab_zero_partial()
5106 vm_page_launder(m); in vm_page_grab_zero_partial()
5111 pmap_zero_page_area(m, base, end - base); in vm_page_grab_zero_partial()
5112 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m)); in vm_page_grab_zero_partial()
5113 vm_page_set_dirty(m); in vm_page_grab_zero_partial()
5114 vm_page_xunbusy(m); in vm_page_grab_zero_partial()
5119 * Locklessly grab a valid page. If the page is not valid or not yet
5126 vm_page_t m; in vm_page_grab_valid_unlocked() local
5139 * Attempt a lockless lookup and busy. We need at least an sbusy in vm_page_grab_valid_unlocked()
5140 * before we can inspect the valid field and return a wired page. in vm_page_grab_valid_unlocked()
5144 m = vm_page_acquire_unlocked(object, pindex, NULL, flags); in vm_page_grab_valid_unlocked()
5145 if (m == PAGE_NOT_ACQUIRED) in vm_page_grab_valid_unlocked()
5147 if (m != NULL) { in vm_page_grab_valid_unlocked()
5148 if (vm_page_all_valid(m)) { in vm_page_grab_valid_unlocked()
5150 vm_page_wire(m); in vm_page_grab_valid_unlocked()
5151 vm_page_grab_release(m, allocflags); in vm_page_grab_valid_unlocked()
5152 *mp = m; in vm_page_grab_valid_unlocked()
5155 vm_page_busy_release(m); in vm_page_grab_valid_unlocked()
5170 * page offset within the range, if a page already exists within the object
5187 * VM_ALLOC_NODUMP do not include the pages in a kernel core dump
5197 * may return a partial prefix of the requested range.
5204 vm_page_t m; in vm_page_grab_pages() local
5219 ahead = -1; in vm_page_grab_pages()
5223 &pages, pindex + i, &ma[i], count - i); in vm_page_grab_pages()
5225 if (ahead-- > 0) { in vm_page_grab_pages()
5226 m = ma[i]; in vm_page_grab_pages()
5227 if (!vm_page_tryacquire(m, allocflags)) { in vm_page_grab_pages()
5228 if (vm_page_grab_sleep(object, m, pindex + i, in vm_page_grab_pages()
5238 m = vm_page_alloc_iter(object, pindex + i, in vm_page_grab_pages()
5239 pflags | VM_ALLOC_COUNT(count - i), &pages); in vm_page_grab_pages()
5241 if (m == NULL) { in vm_page_grab_pages()
5247 ma[i] = m; in vm_page_grab_pages()
5249 if (vm_page_none_valid(m) && in vm_page_grab_pages()
5251 if ((m->flags & PG_ZERO) == 0) in vm_page_grab_pages()
5252 pmap_zero_page(m); in vm_page_grab_pages()
5253 vm_page_valid(m); in vm_page_grab_pages()
5255 vm_page_grab_release(m, allocflags); in vm_page_grab_pages()
5268 vm_page_t m; in vm_page_grab_pages_unlocked() local
5282 num_fetched = vm_radix_lookup_range_unlocked(&object->rtree, pindex, in vm_page_grab_pages_unlocked()
5285 m = vm_page_acquire_unlocked(object, pindex, ma[i], flags); in vm_page_grab_pages_unlocked()
5286 if (m == PAGE_NOT_ACQUIRED) in vm_page_grab_pages_unlocked()
5288 if (m == NULL) in vm_page_grab_pages_unlocked()
5290 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { in vm_page_grab_pages_unlocked()
5291 if ((m->flags & PG_ZERO) == 0) in vm_page_grab_pages_unlocked()
5292 pmap_zero_page(m); in vm_page_grab_pages_unlocked()
5293 vm_page_valid(m); in vm_page_grab_pages_unlocked()
5295 /* m will still be wired or busy according to flags. */ in vm_page_grab_pages_unlocked()
5296 vm_page_grab_release(m, allocflags); in vm_page_grab_pages_unlocked()
5298 ma[i] = m; in vm_page_grab_pages_unlocked()
5302 count -= i; in vm_page_grab_pages_unlocked()
5311 * Mapping function for valid or dirty bits in a page.
5313 * Inputs are required to range within a page.
5330 last_bit = (base + size - 1) >> DEV_BSHIFT; in vm_page_bits()
5332 return (((vm_page_bits_t)2 << last_bit) - in vm_page_bits()
5337 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) in vm_page_bits_set() argument
5354 * Use a trick to perform a 32-bit atomic on the in vm_page_bits_set()
5358 shift = addr & (sizeof(uint32_t) - 1); in vm_page_bits_set()
5360 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; in vm_page_bits_set()
5364 addr &= ~(sizeof(uint32_t) - 1); in vm_page_bits_set()
5370 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) in vm_page_bits_clear() argument
5387 * Use a trick to perform a 32-bit atomic on the in vm_page_bits_clear()
5391 shift = addr & (sizeof(uint32_t) - 1); in vm_page_bits_clear()
5393 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; in vm_page_bits_clear()
5397 addr &= ~(sizeof(uint32_t) - 1); in vm_page_bits_clear()
5403 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) in vm_page_bits_swap() argument
5436 * Use a trick to perform a 32-bit atomic on the in vm_page_bits_swap()
5440 shift = addr & (sizeof(uint32_t) - 1); in vm_page_bits_swap()
5442 shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; in vm_page_bits_swap()
5446 addr &= ~(sizeof(uint32_t) - 1); in vm_page_bits_swap()
5461 * Sets portions of a page valid. The arguments are expected
5469 vm_page_set_valid_range(vm_page_t m, int base, int size) in vm_page_set_valid_range() argument
5474 vm_page_assert_busied(m); in vm_page_set_valid_range()
5480 * bit is clear, we have to zero out a portion of the in vm_page_set_valid_range()
5484 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) in vm_page_set_valid_range()
5485 pmap_zero_page_area(m, frag, base - frag); in vm_page_set_valid_range()
5489 * valid bit is clear, we have to zero out a portion of in vm_page_set_valid_range()
5494 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) in vm_page_set_valid_range()
5495 pmap_zero_page_area(m, endoff, in vm_page_set_valid_range()
5496 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); in vm_page_set_valid_range()
5502 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, in vm_page_set_valid_range()
5503 ("vm_page_set_valid_range: page %p is dirty", m)); in vm_page_set_valid_range()
5509 if (vm_page_xbusied(m)) in vm_page_set_valid_range()
5510 m->valid |= pagebits; in vm_page_set_valid_range()
5512 vm_page_bits_set(m, &m->valid, pagebits); in vm_page_set_valid_range()
5520 vm_page_set_dirty(vm_page_t m) in vm_page_set_dirty() argument
5524 VM_PAGE_OBJECT_BUSY_ASSERT(m); in vm_page_set_dirty()
5526 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { in vm_page_set_dirty()
5527 old = m->dirty; in vm_page_set_dirty()
5528 m->dirty = VM_PAGE_BITS_ALL; in vm_page_set_dirty()
5530 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); in vm_page_set_dirty()
5531 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) in vm_page_set_dirty()
5532 vm_pager_page_unswapped(m); in vm_page_set_dirty()
5541 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) in vm_page_clear_dirty_mask() argument
5544 vm_page_assert_busied(m); in vm_page_clear_dirty_mask()
5549 * layer can call vm_page_dirty() without holding a distinguished in vm_page_clear_dirty_mask()
5553 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) in vm_page_clear_dirty_mask()
5554 m->dirty &= ~pagebits; in vm_page_clear_dirty_mask()
5556 vm_page_bits_clear(m, &m->dirty, pagebits); in vm_page_clear_dirty_mask()
5562 * Sets portions of a page valid and clean. The arguments are expected
5570 vm_page_set_validclean(vm_page_t m, int base, int size) in vm_page_set_validclean() argument
5575 vm_page_assert_busied(m); in vm_page_set_validclean()
5581 * bit is clear, we have to zero out a portion of the in vm_page_set_validclean()
5585 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) in vm_page_set_validclean()
5586 pmap_zero_page_area(m, frag, base - frag); in vm_page_set_validclean()
5590 * valid bit is clear, we have to zero out a portion of in vm_page_set_validclean()
5595 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) in vm_page_set_validclean()
5596 pmap_zero_page_area(m, endoff, in vm_page_set_validclean()
5597 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); in vm_page_set_validclean()
5602 * use this opportunity to clear the PGA_NOSYNC flag. If a process in vm_page_set_validclean()
5603 * takes a write fault on a MAP_NOSYNC memory area the flag will in vm_page_set_validclean()
5610 oldvalid = m->valid; in vm_page_set_validclean()
5612 if (vm_page_xbusied(m)) in vm_page_set_validclean()
5613 m->valid |= pagebits; in vm_page_set_validclean()
5615 vm_page_bits_set(m, &m->valid, pagebits); in vm_page_set_validclean()
5617 if ((frag = base & (DEV_BSIZE - 1)) != 0) { in vm_page_set_validclean()
5618 frag = DEV_BSIZE - frag; in vm_page_set_validclean()
5620 size -= frag; in vm_page_set_validclean()
5624 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); in vm_page_set_validclean()
5635 * a concurrent pmap operation, such as in vm_page_set_validclean()
5636 * pmap_protect(), could clear a modification in the in vm_page_set_validclean()
5641 pmap_clear_modify(m); in vm_page_set_validclean()
5642 m->dirty = 0; in vm_page_set_validclean()
5643 vm_page_aflag_clear(m, PGA_NOSYNC); in vm_page_set_validclean()
5644 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) in vm_page_set_validclean()
5645 m->dirty &= ~pagebits; in vm_page_set_validclean()
5647 vm_page_clear_dirty_mask(m, pagebits); in vm_page_set_validclean()
5651 vm_page_clear_dirty(vm_page_t m, int base, int size) in vm_page_clear_dirty() argument
5654 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); in vm_page_clear_dirty()
5660 * Invalidates DEV_BSIZE'd chunks within a page. Both the
5664 vm_page_set_invalid(vm_page_t m, int base, int size) in vm_page_set_invalid() argument
5671 * read-only while we're in the process of invalidating them. in vm_page_set_invalid()
5673 object = m->object; in vm_page_set_invalid()
5675 vm_page_assert_busied(m); in vm_page_set_invalid()
5677 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + in vm_page_set_invalid()
5678 size >= object->un_pager.vnp.vnp_size) in vm_page_set_invalid()
5682 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) in vm_page_set_invalid()
5683 pmap_remove_all(m); in vm_page_set_invalid()
5684 KASSERT((bits == 0 && vm_page_all_valid(m)) || in vm_page_set_invalid()
5685 !pmap_page_is_mapped(m), in vm_page_set_invalid()
5686 ("vm_page_set_invalid: page %p is mapped", m)); in vm_page_set_invalid()
5687 if (vm_page_xbusied(m)) { in vm_page_set_invalid()
5688 m->valid &= ~bits; in vm_page_set_invalid()
5689 m->dirty &= ~bits; in vm_page_set_invalid()
5691 vm_page_bits_clear(m, &m->valid, bits); in vm_page_set_invalid()
5692 vm_page_bits_clear(m, &m->dirty, bits); in vm_page_set_invalid()
5701 * against concurrent read-only pmap enter which is done without
5705 vm_page_invalid(vm_page_t m) in vm_page_invalid() argument
5708 vm_page_assert_busied(m); in vm_page_invalid()
5709 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_page_invalid()
5710 MPASS(!pmap_page_is_mapped(m)); in vm_page_invalid()
5712 if (vm_page_xbusied(m)) in vm_page_invalid()
5713 m->valid = 0; in vm_page_invalid()
5715 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); in vm_page_invalid()
5721 * The kernel assumes that the invalid portions of a page contain
5723 * When this occurs, we must zero out the non-valid portions of the
5726 * Pages are most often semi-valid when the end of a file is mapped
5730 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) in vm_page_zero_invalid() argument
5737 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the in vm_page_zero_invalid()
5743 (m->valid & ((vm_page_bits_t)1 << i))) { in vm_page_zero_invalid()
5745 pmap_zero_page_area(m, in vm_page_zero_invalid()
5746 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); in vm_page_zero_invalid()
5758 vm_page_valid(m); in vm_page_zero_invalid()
5770 * hold a busy lock to prevent invalidation.
5773 vm_page_is_valid(vm_page_t m, int base, int size) in vm_page_is_valid() argument
5778 return (vm_page_any_valid(m) && (m->valid & bits) == bits); in vm_page_is_valid()
5786 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m) in vm_page_ps_test() argument
5791 object = m->object; in vm_page_ps_test()
5792 if (skip_m != NULL && skip_m->object != object) in vm_page_ps_test()
5795 KASSERT(psind <= m->psind, in vm_page_ps_test()
5796 ("psind %d > psind %d of m %p", psind, m->psind, m)); in vm_page_ps_test()
5800 * The physically contiguous pages that make up a superpage, i.e., a in vm_page_ps_test()
5801 * page with a page size index ("psind") greater than zero, will in vm_page_ps_test()
5806 if (m[i].object != object) in vm_page_ps_test()
5808 if (&m[i] == skip_m) in vm_page_ps_test()
5810 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) in vm_page_ps_test()
5816 * "false". However, that would require a write lock in vm_page_ps_test()
5817 * on the object containing "m[i]". in vm_page_ps_test()
5819 if (m[i].dirty != VM_PAGE_BITS_ALL) in vm_page_ps_test()
5823 m[i].valid != VM_PAGE_BITS_ALL) in vm_page_ps_test()
5833 vm_page_test_dirty(vm_page_t m) in vm_page_test_dirty() argument
5836 vm_page_assert_busied(m); in vm_page_test_dirty()
5837 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) in vm_page_test_dirty()
5838 vm_page_dirty(m); in vm_page_test_dirty()
5842 vm_page_valid(vm_page_t m) in vm_page_valid() argument
5845 vm_page_assert_busied(m); in vm_page_valid()
5846 if (vm_page_xbusied(m)) in vm_page_valid()
5847 m->valid = VM_PAGE_BITS_ALL; in vm_page_valid()
5849 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); in vm_page_valid()
5854 vm_page_object_busy_assert(vm_page_t m) in vm_page_object_busy_assert() argument
5859 * holder of a page or object busy. in vm_page_object_busy_assert()
5861 if (m->object != NULL && !vm_page_busied(m)) in vm_page_object_busy_assert()
5862 VM_OBJECT_ASSERT_BUSY(m->object); in vm_page_object_busy_assert()
5866 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits) in vm_page_assert_pga_writeable() argument
5877 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_page_assert_pga_writeable()
5879 if (!vm_page_xbusied(m)) in vm_page_assert_pga_writeable()
5880 VM_OBJECT_ASSERT_BUSY(m->object); in vm_page_assert_pga_writeable()
5924 vm_page_t m; in DB_SHOW_COMMAND() local
5935 m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); in DB_SHOW_COMMAND()
5937 m = PHYS_TO_VM_PAGE(addr); in DB_SHOW_COMMAND()
5939 m = (vm_page_t)addr; in DB_SHOW_COMMAND()
5943 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, in DB_SHOW_COMMAND()
5944 m->a.queue, m->ref_count, m->a.flags, m->oflags, in DB_SHOW_COMMAND()
5945 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty); in DB_SHOW_COMMAND()