Lines Matching +full:is +full:- +full:wired

1 /*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
12 * This code is derived from software contributed to Berkeley by
13 * The Mach Operating System project at Carnegie-Mellon University.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
50 * its documentation is hereby granted, provided that both the copyright
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
64 * Pittsburgh PA 15213-3890
125 boolean_t wired;
141 /* Top-level map object. */
218 VM_OBJECT_ASSERT_WLOCKED(m->object);
228 * Return true if a vm_pager_get_pages() call is needed in order to check
238 return ((object->flags & OBJ_SWAP) == 0 ||
239 !pctrie_is_empty(&object->un_pager.swp.swp_blks));
246 if (fs->lookup_still_valid) {
247 vm_map_lookup_done(fs->map, fs->entry);
248 fs->lookup_still_valid = false;
256 if (fs->vp != NULL) {
257 vput(fs->vp);
258 fs->vp = NULL;
266 vm_fault_page_release(&fs->m_cow);
267 vm_fault_page_release(&fs->m);
268 vm_object_pip_wakeup(fs->object);
269 if (fs->object != fs->first_object) {
270 VM_OBJECT_WLOCK(fs->first_object);
271 vm_fault_page_free(&fs->first_m);
272 VM_OBJECT_WUNLOCK(fs->first_object);
273 vm_object_pip_wakeup(fs->first_object);
275 vm_object_deallocate(fs->first_object);
284 VM_OBJECT_UNLOCK(fs->object);
293 if (((fs->prot & VM_PROT_WRITE) == 0 &&
294 (fs->fault_flags & VM_FAULT_DIRTY) == 0) ||
295 (m->oflags & VPO_UNMANAGED) != 0)
300 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 &&
301 (fs->fault_flags & VM_FAULT_WIRE) == 0) ||
302 (fs->fault_flags & VM_FAULT_DIRTY) != 0;
304 vm_object_set_writeable_dirty(m->object);
307 * If the fault is a write, we know that this page is being
311 * Also, since the page is now dirty, we can possibly tell
316 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC
317 * if the page is already dirty to prevent data written with
323 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0)
345 MPASS(fs->vp == NULL);
348 * If we fail, vast majority of the time it is because the page is not
354 m = vm_page_lookup_unlocked(fs->first_object, fs->first_pindex);
356 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) {
357 VM_OBJECT_WLOCK(fs->first_object);
361 vaddr = fs->vaddr;
363 VM_OBJECT_RLOCK(fs->first_object);
366 * Now that we stabilized the state, revalidate the page is in the shape
370 if (m->object != fs->first_object || m->pindex != fs->first_pindex)
373 vm_object_busy(fs->first_object);
376 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m)))
382 if ((m->flags & PG_FICTITIOUS) == 0 &&
384 psind = m_super->psind;
388 if ((fs->prot & VM_PROT_WRITE) != 0) {
396 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0)
399 while (rounddown2(vaddr, pagesizes[psind]) < fs->entry->start ||
400 roundup2(vaddr + 1, pagesizes[psind]) > fs->entry->end ||
401 (vaddr & (pagesizes[psind] - 1)) !=
402 (VM_PAGE_TO_PHYS(m) & (pagesizes[psind] - 1)) ||
404 !pmap_ps_enabled(fs->map->pmap)) {
405 psind--;
408 m_super += rounddown2(m - m_super,
410 KASSERT(m_super->psind >= psind,
411 ("psind %d of m_super %p < %d", m_super->psind,
419 fs->fault_type |= VM_PROT_WRITE;
423 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type |
424 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) !=
427 if (fs->m_hold != NULL) {
428 (*fs->m_hold) = m;
431 if (psind == 0 && !fs->wired)
433 VM_OBJECT_RUNLOCK(fs->first_object);
435 vm_object_unbusy(fs->first_object);
436 vm_map_lookup_done(fs->map, fs->entry);
437 curthread->td_ru.ru_minflt++;
440 vm_object_unbusy(fs->first_object);
442 if (!VM_OBJECT_TRYUPGRADE(fs->first_object)) {
443 VM_OBJECT_RUNLOCK(fs->first_object);
444 VM_OBJECT_WLOCK(fs->first_object);
453 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
454 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
456 if (!vm_map_trylock_read(fs->map)) {
457 VM_OBJECT_WUNLOCK(fs->first_object);
458 vm_map_lock_read(fs->map);
459 VM_OBJECT_WLOCK(fs->first_object);
461 fs->lookup_still_valid = true;
469 * Check each page to ensure that the pager is obeying the
489 KASSERT(m != NULL && m->pindex == pidx,
506 MPASS(fs->object == fs->first_object);
507 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
508 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
509 MPASS(fs->first_object->backing_object == NULL);
510 MPASS(fs->lookup_still_valid);
512 pager_first = OFF_TO_IDX(fs->entry->offset);
513 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
522 * There is no guarantee that the method will be called again
523 * if the current fault is for read, and a future fault is
527 rv = vm_pager_populate(fs->first_object, fs->first_pindex,
528 fs->fault_type, fs->entry->max_protection, &pager_first,
531 VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
534 * VM_PAGER_BAD is the backdoor for a pager to request
538 if (fs->map->timestamp != fs->map_generation)
545 /* Ensure that the driver is obeying the interface. */
547 MPASS(fs->first_pindex <= pager_last);
548 MPASS(fs->first_pindex >= pager_first);
549 MPASS(pager_last < fs->first_object->size);
552 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(fs->entry);
553 if (fs->map->timestamp != fs->map_generation) {
555 vm_fault_populate_cleanup(fs->first_object, pager_first,
558 m = vm_page_lookup(fs->first_object, pager_first);
559 if (m != fs->m)
566 * The map is unchanged after our last unlock. Process the fault.
574 m = vm_page_lookup(fs->first_object, pager_first);
576 VM_OBJECT_WUNLOCK(fs->first_object);
577 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) -
578 fs->entry->offset;
580 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0,
582 (uintmax_t)fs->entry->start, (uintmax_t)pager_first,
583 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr));
584 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0,
587 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot,
588 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) |
590 VM_OBJECT_WLOCK(fs->first_object);
596 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) {
600 if (fs->m_hold != NULL) {
601 *fs->m_hold = m + (fs->first_pindex - pager_first);
602 vm_page_wire(*fs->m_hold);
608 * The range [pager_first, pager_last] that is given to the
609 * pager is only a hint. The pager may populate any range
614 map_first = OFF_TO_IDX(fs->entry->offset);
616 vm_fault_populate_cleanup(fs->first_object, pager_first,
617 map_first - 1);
620 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
622 vm_fault_populate_cleanup(fs->first_object, map_last + 1,
626 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
628 pidx += npages, m = TAILQ_NEXT(&m[npages - 1], listq)) {
629 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
630 KASSERT(m != NULL && m->pindex == pidx,
632 psind = m->psind;
633 while (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
634 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
635 !pmap_ps_enabled(fs->map->pmap)))
636 psind--;
643 VM_OBJECT_WUNLOCK(fs->first_object);
644 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type |
645 (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
652 * key. Revert to single-page mappings if this happens.
658 MPASS(!fs->wired);
660 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
661 &m[i], fs->prot, fs->fault_type, 0);
666 VM_OBJECT_WLOCK(fs->first_object);
668 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 &&
669 m[i].pindex == fs->first_pindex)
673 if (fs->m_hold != NULL &&
674 m[i].pindex == fs->first_pindex) {
675 (*fs->m_hold) = &m[i];
682 curthread->td_ru.ru_majflt++;
702 * If successful, the page is inserted into the
708 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
709 * a standard error specifying why the fault is fatal is returned.
756 * the images without the ABI-tag ELF
760 curproc->p_osrel >= P_OSREL_SIGSEGV) {
789 if (fs->object == fs->first_object)
790 VM_OBJECT_ASSERT_WLOCKED(fs->object);
792 if (!fs->can_read_lock) {
793 VM_OBJECT_ASSERT_WLOCKED(fs->object);
797 if (VM_OBJECT_WOWNED(fs->object))
800 if (VM_OBJECT_TRYUPGRADE(fs->object))
812 if (fs->object->type != OBJT_VNODE)
814 vp = fs->object->handle;
815 if (vp == fs->vp) {
816 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked");
833 * paging-in-progress count incremented. Otherwise, we could
838 fs->vp = vp;
849 fs->vp = vp;
855 * Calculate the desired readahead. Handle drop-behind.
865 KASSERT(fs->lookup_still_valid, ("map unlocked"));
866 era = fs->entry->read_ahead;
867 behavior = vm_map_entry_behavior(fs->entry);
872 if (fs->vaddr == fs->entry->next_read)
873 vm_fault_dontneed(fs, fs->vaddr, nera);
874 } else if (fs->vaddr == fs->entry->next_read) {
876 * This is a sequential fault. Arithmetically
878 * the read-ahead window. The requested
879 * number of pages is "# of sequential faults
889 vm_fault_dontneed(fs, fs->vaddr, nera);
892 * This is a non-sequential fault.
901 fs->entry->read_ahead = nera;
912 KASSERT(!fs->lookup_still_valid,
914 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type |
915 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object,
916 &fs->first_pindex, &fs->prot, &fs->wired);
922 fs->map_generation = fs->map->timestamp;
924 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) {
926 __func__, (u_long)fs->vaddr);
929 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION &&
930 fs->entry->wiring_thread != curthread) {
931 vm_map_unlock_read(fs->map);
932 vm_map_lock(fs->map);
933 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) &&
934 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
936 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
937 vm_map_unlock_and_wait(fs->map, 0);
939 vm_map_unlock(fs->map);
943 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0);
945 if (fs->wired)
946 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY);
948 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0,
949 ("!fs->wired && VM_FAULT_WIRE"));
950 fs->lookup_still_valid = true;
963 if (!vm_map_trylock_read(fs->map))
966 fs->lookup_still_valid = true;
967 if (fs->map->timestamp == fs->map_generation)
970 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type,
971 &fs->entry, &retry_object, &retry_pindex, &retry_prot,
972 &fs->wired);
982 if (retry_object != fs->first_object ||
983 retry_pindex != fs->first_pindex)
989 * read to write permission is OK - we leave the page
990 * write-protected, and catch the write fault. Changing from
992 * write-enabled after all.
994 fs->prot &= retry_prot;
995 fs->fault_type &= retry_prot;
996 if (fs->prot == 0)
999 /* Reassert because wired may have changed. */
1000 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0,
1001 ("!wired && VM_FAULT_WIRE"));
1011 KASSERT(fs->object != fs->first_object,
1027 fs->object->shadow_count == 1 && fs->object->ref_count == 1 &&
1031 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 &&
1035 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) &&
1036 fs->object == fs->first_object->backing_object &&
1037 VM_OBJECT_TRYWLOCK(fs->object)) {
1039 * Remove but keep xbusy for replace. fs->m is moved into
1040 * fs->first_object and left busy while fs->first_m is
1043 vm_page_remove_xbusy(fs->m);
1044 vm_page_replace(fs->m, fs->first_object, fs->first_pindex,
1045 fs->first_m);
1046 vm_page_dirty(fs->m);
1051 vm_reserv_rename(fs->m, fs->first_object, fs->object,
1052 OFF_TO_IDX(fs->first_object->backing_object_offset));
1054 VM_OBJECT_WUNLOCK(fs->object);
1055 VM_OBJECT_WUNLOCK(fs->first_object);
1056 fs->first_m = fs->m;
1057 fs->m = NULL;
1061 VM_OBJECT_WUNLOCK(fs->first_object);
1065 pmap_copy_page(fs->m, fs->first_m);
1066 vm_page_valid(fs->first_m);
1067 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) {
1068 vm_page_wire(fs->first_m);
1069 vm_page_unwire(fs->m, PQ_INACTIVE);
1073 * pmap_enter is complete.
1075 fs->m_cow = fs->m;
1076 fs->m = NULL;
1079 * Typically, the shadow object is either private to this
1083 * we need to ensure that any pmap-level mappings to the
1084 * original, copy-on-write page from the backing object are
1087 * The flag check is racy, but this is tolerable: if
1088 * OBJ_ONEMAPPING is cleared after the check, the busy state
1091 * address space. If OBJ_ONEMAPPING is set after the check,
1095 vm_page_assert_xbusied(fs->m_cow);
1096 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0)
1097 pmap_remove_all(fs->m_cow);
1100 vm_object_pip_wakeup(fs->object);
1105 fs->object = fs->first_object;
1106 fs->pindex = fs->first_pindex;
1107 fs->m = fs->first_m;
1109 curthread->td_cow++;
1117 if (fs->object == fs->first_object || !fs->can_read_lock)
1118 VM_OBJECT_ASSERT_WLOCKED(fs->object);
1120 VM_OBJECT_ASSERT_LOCKED(fs->object);
1126 * the next object. However, if this is the top-level
1132 if (fs->object == fs->first_object) {
1133 fs->first_m = fs->m;
1134 fs->m = NULL;
1135 } else if (fs->m != NULL) {
1137 fs->can_read_lock = false;
1141 vm_fault_page_free(&fs->m);
1148 next_object = fs->object->backing_object;
1151 MPASS(fs->first_m != NULL);
1152 KASSERT(fs->object != next_object, ("object loop %p", next_object));
1153 if (fs->can_read_lock)
1158 if (fs->object != fs->first_object)
1159 vm_object_pip_wakeup(fs->object);
1160 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1161 VM_OBJECT_UNLOCK(fs->object);
1162 fs->object = next_object;
1175 if (fs->object != fs->first_object) {
1176 vm_object_pip_wakeup(fs->object);
1177 fs->object = fs->first_object;
1178 fs->pindex = fs->first_pindex;
1180 MPASS(fs->first_m != NULL);
1181 MPASS(fs->m == NULL);
1182 fs->m = fs->first_m;
1183 fs->first_m = NULL;
1188 if ((fs->m->flags & PG_ZERO) == 0) {
1189 pmap_zero_page(fs->m);
1194 vm_page_valid(fs->m);
1209 if (!fs->oom_started) {
1210 fs->oom_started = true;
1211 getmicrotime(&fs->oom_start_time);
1216 timevalsub(&now, &fs->oom_start_time);
1223 curproc->p_pid, curproc->p_comm);
1225 fs->oom_started = false;
1238 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) {
1245 if (fs->pindex >= fs->object->size) {
1250 if (fs->object == fs->first_object &&
1251 (fs->first_object->flags & OBJ_POPULATE) != 0 &&
1252 fs->first_object->shadow_count == 0) {
1277 * daemon in an attempt to resolve an out-of-memory condition.
1279 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED
1283 dset = fs->object->domain.dr_policy;
1285 dset = curthread->td_domain.dr_policy;
1286 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) {
1288 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex);
1290 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) {
1294 fs->m = vm_page_alloc(fs->object, fs->pindex,
1297 if (fs->m == NULL) {
1302 fs->oom_started = false;
1308 * Call the pager to retrieve the page if there is a chance
1325 * unlocking the map, using the saved addresses is
1328 e_start = fs->entry->start;
1329 e_end = fs->entry->end;
1330 behavior = vm_map_entry_behavior(fs->entry);
1343 if (fs->nera == -1 && !P_KILLED(curproc))
1344 fs->nera = vm_fault_readahead(fs);
1358 KASSERT(fs->vp == NULL || !vm_map_is_system(fs->map),
1359 ("vm_fault: vnode-backed object mapped by system map"));
1365 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
1370 /* Is this a sequential fault? */
1371 if (fs->nera > 0) {
1373 ahead = fs->nera;
1376 * Request a cluster of pages that is
1380 * boundary is more likely to coincide
1385 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT;
1387 atop(fs->vaddr - e_start));
1388 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset;
1390 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1);
1394 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp);
1399 curproc->p_pid, curproc->p_comm);
1406 VM_OBJECT_WLOCK(fs->object);
1407 vm_fault_page_free(&fs->m);
1417 * Wait/Retry if the page is busy. We have to do this if the page is
1419 * read busy for pageouts (and even pageins if it is the vnode pager),
1424 * is marked valid, but since such pages are typically already pmap'd,
1425 * putting that special case in might be more effort then it is worth.
1434 * sleeping so that the page daemon is less
1437 vm_page_aflag_set(fs->m, PGA_REFERENCED);
1438 if (fs->object != fs->first_object) {
1439 vm_fault_page_release(&fs->first_m);
1440 vm_object_pip_wakeup(fs->first_object);
1442 vm_object_pip_wakeup(fs->object);
1444 if (fs->m != vm_page_lookup(fs->object, fs->pindex) ||
1445 !vm_page_busy_sleep(fs->m, "vmpfw", 0))
1446 VM_OBJECT_UNLOCK(fs->object);
1448 vm_object_deallocate(fs->first_object);
1452 * Handle page lookup, populate, allocate, page-in for the current
1455 * The object is locked on entry and will remain locked with a return
1465 if (fs->object == fs->first_object || !fs->can_read_lock)
1466 VM_OBJECT_ASSERT_WLOCKED(fs->object);
1468 VM_OBJECT_ASSERT_LOCKED(fs->object);
1471 * If the object is marked for imminent termination, we retry
1475 if ((fs->object->flags & OBJ_DEAD) != 0) {
1476 dead = fs->object->type == OBJT_DEAD;
1485 * See if the page is resident.
1487 fs->m = vm_page_lookup(fs->object, fs->pindex);
1488 if (fs->m != NULL) {
1489 if (!vm_page_tryxbusy(fs->m)) {
1495 * The page is marked busy for other processes and the
1496 * pagedaemon. If it is still completely valid we are
1499 if (vm_page_all_valid(fs->m)) {
1500 VM_OBJECT_UNLOCK(fs->object);
1506 * Page is not resident. If the pager might contain the page
1507 * or this is the beginning of the search, allocate a new
1510 if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) ||
1511 fs->object == fs->first_object)) {
1513 fs->can_read_lock = false;
1527 if (vm_fault_object_needs_getpages(fs->object)) {
1530 * or found an existing page that is only partially
1534 * page is exclusive busied. The exclusive busy
1536 * the object lock is dropped.
1538 VM_OBJECT_UNLOCK(fs->object);
1541 VM_OBJECT_WLOCK(fs->object);
1560 if ((curthread->td_pflags & TDP_NOFAULTING) != 0)
1570 fs.nera = -1;
1590 * Try to avoid lock contention on the top-level object through
1591 * special-case handling of some types of page faults, specifically,
1592 * those that are mapping an existing page from the top-level object.
1597 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 &&
1611 * are messing with it. Once we have the reference, the map is free
1615 * Bump the paging-in-progress count to prevent size changes (e.g.
1629 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) {
1677 * traverse into a backing object or zero fill if none is
1708 * If the page is being written, but isn't already owned by the
1709 * top-level object, we have to copy it into a new page owned by the
1710 * top-level object.
1719 * We only try to prefault read-only mappings to the
1720 * neighboring pages when this copy-on-write fault is
1722 * is typically wasted effort.
1754 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
1757 * Page must be completely valid or it is not fit to
1772 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot,
1773 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0);
1775 fs.wired == 0)
1781 * If the page is not wired down, then put it where the pageout daemon
1801 curthread->td_ru.ru_majflt++;
1803 if (racct_enable && fs.object->type == OBJT_VNODE) {
1818 curthread->td_ru.ru_minflt++;
1828 * cross a cluster-size boundary. The cluster size is the greater of the
1831 * When "fs->first_object" is a shadow object, the pages in the backing object
1845 VM_OBJECT_ASSERT_UNLOCKED(fs->object);
1846 first_object = fs->first_object;
1848 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
1854 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) &&
1855 (entry = fs->entry)->start < end) {
1856 if (end - entry->start < size)
1857 start = entry->start;
1859 start = end - size;
1860 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1861 pstart = OFF_TO_IDX(entry->offset) + atop(start -
1862 entry->start);
1864 pend = OFF_TO_IDX(entry->offset) + atop(end -
1865 entry->start);
1866 while ((m = m_next) != NULL && m->pindex < pend) {
1881 * is in the inactive queue is racy; in the
1895 * pagefaults into a processes address space. It is a "cousin"
1912 pmap = fs->map->pmap;
1913 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
1916 entry = fs->entry;
1919 starta = entry->start;
1921 starta = addra - backward * PAGE_SIZE;
1922 if (starta < entry->start)
1923 starta = entry->start;
1925 prot = entry->protection;
1931 if ((fs->prot & VM_PROT_WRITE) != 0)
1937 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra
1938 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
1943 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
1948 if (addr < starta || addr >= entry->end)
1954 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1955 lobject = entry->object.vm_object;
1960 (backing_object = lobject->backing_object) != NULL) {
1961 KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
1963 pindex += lobject->backing_object_offset >> PAGE_SHIFT;
1965 if (!obj_locked || lobject != entry->object.vm_object)
1970 if (!obj_locked || lobject != entry->object.vm_object)
1975 (m->flags & PG_FICTITIOUS) == 0)
1977 if (!obj_locked || lobject != entry->object.vm_object)
1986 * pages are successfully held, then the number of held pages is returned
1988 * of the pages cannot be held, -1 is returned.
2005 return (-1);
2007 if (atop(end - addr) > max_count)
2009 count = atop(end - addr);
2012 * Most likely, the physical pages are resident in the pmap, so it is
2017 *mp = pmap_extract_and_hold(map->pmap, va, prot);
2021 (*mp)->dirty != VM_PAGE_BITS_ALL) {
2028 * The object lock is not held here.
2042 * i.e., TDP_NOFAULTING is set, we must not sleep nor
2050 (curthread->td_pflags & TDP_NOFAULTING) != 0)
2062 return (-1);
2070 * underlying pages. When src_entry is equal to dst_entry, function
2071 * implements COW for wired-down map entry. Otherwise, it forks
2072 * wired entry into dst_map.
2076 * The source map entry must be wired down (or be a sharing map
2077 * entry corresponding to a main map entry that is wired down).
2093 KASSERT(upgrade || dst_entry->object.vm_object == NULL,
2101 * A writeable large page mapping is only created if all of
2106 access = prot = dst_entry->protection;
2110 src_object = src_entry->object.vm_object;
2111 src_pindex = OFF_TO_IDX(src_entry->offset);
2113 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2118 * Create the top-level object for the destination entry.
2119 * Doesn't actually shadow anything - we copy the pages
2122 dst_object = vm_object_allocate_anon(atop(dst_entry->end -
2123 dst_entry->start), NULL, NULL, 0);
2125 dst_object->flags |= OBJ_COLORED;
2126 dst_object->pg_color = atop(dst_entry->start);
2128 dst_object->domain = src_object->domain;
2129 dst_object->charge = dst_entry->end - dst_entry->start;
2131 dst_entry->object.vm_object = dst_object;
2132 dst_entry->offset = 0;
2133 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC;
2138 KASSERT(dst_entry->cred == NULL,
2140 dst_object->cred = curthread->td_ucred;
2141 crhold(dst_object->cred);
2142 *fork_charge += dst_object->charge;
2143 } else if ((dst_object->flags & OBJ_SWAP) != 0 &&
2144 dst_object->cred == NULL) {
2145 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
2147 dst_object->cred = dst_entry->cred;
2148 dst_entry->cred = NULL;
2154 * destination object. Since the source is wired, those pages
2155 * must exist. In contrast, the destination is pageable.
2160 for (vaddr = dst_entry->start, dst_pindex = 0;
2161 vaddr < dst_entry->end;
2166 * Because the source is wired down, the page will be
2174 (backing_object = object->backing_object) != NULL) {
2176 * Unless the source mapping is read-only or
2177 * it is presently being upgraded from
2178 * read-only, the first object in the shadow
2181 * executed when the source mapping is already
2184 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 ||
2189 pindex += OFF_TO_IDX(object->backing_object_offset);
2215 (object->flags & OBJ_ONEMAPPING) == 0)
2225 dst_m->dirty = dst_m->valid = src_m->valid;
2231 if (dst_m->pindex >= dst_object->size) {
2234 * out of bounds if the object type is
2243 * Enter it in the pmap. If a wired, copy-on-write
2244 * mapping is being replaced by a write-enabled
2251 * all copies of the wired map entry have similar
2256 pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
2270 ("dst_m %p is not wired", dst_m));
2279 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
2285 * Block entry into the machine-independent layer's page fault handler by
2287 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of