vm_fault.c (205be21d997d5becfaee5918386d4087b09f98a8) vm_fault.c (0012f373e43db2341c20329163ed2d5ad3b0f341)
1/*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 197 unchanged lines hidden (view full) ---

206 bool need_dirty;
207
208 if (((prot & VM_PROT_WRITE) == 0 &&
209 (fault_flags & VM_FAULT_DIRTY) == 0) ||
210 (m->oflags & VPO_UNMANAGED) != 0)
211 return;
212
213 VM_OBJECT_ASSERT_LOCKED(m->object);
1/*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 197 unchanged lines hidden (view full) ---

206 bool need_dirty;
207
208 if (((prot & VM_PROT_WRITE) == 0 &&
209 (fault_flags & VM_FAULT_DIRTY) == 0) ||
210 (m->oflags & VPO_UNMANAGED) != 0)
211 return;
212
213 VM_OBJECT_ASSERT_LOCKED(m->object);
214 VM_PAGE_OBJECT_BUSY_ASSERT(m);
214
215 need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
216 (fault_flags & VM_FAULT_WIRE) == 0) ||
217 (fault_flags & VM_FAULT_DIRTY) != 0;
218
219 if (set_wd)
220 vm_object_set_writeable_dirty(m->object);
221 else

--- 58 unchanged lines hidden (view full) ---

280#endif
281 int psind, rv;
282
283 MPASS(fs->vp == NULL);
284 vm_object_busy(fs->first_object);
285 m = vm_page_lookup(fs->first_object, fs->first_pindex);
286 /* A busy page can be mapped for read|execute access. */
287 if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
215
216 need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
217 (fault_flags & VM_FAULT_WIRE) == 0) ||
218 (fault_flags & VM_FAULT_DIRTY) != 0;
219
220 if (set_wd)
221 vm_object_set_writeable_dirty(m->object);
222 else

--- 58 unchanged lines hidden (view full) ---

281#endif
282 int psind, rv;
283
284 MPASS(fs->vp == NULL);
285 vm_object_busy(fs->first_object);
286 m = vm_page_lookup(fs->first_object, fs->first_pindex);
287 /* A busy page can be mapped for read|execute access. */
288 if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
288 vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) {
289 vm_page_busied(m)) || !vm_page_all_valid(m)) {
289 rv = KERN_FAILURE;
290 goto out;
291 }
292 m_map = m;
293 psind = 0;
294#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
295 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
296 VM_NRESERVLEVEL > 0

--- 66 unchanged lines hidden (view full) ---

363{
364
365 /*
366 * Check each page to ensure that the pager is obeying the
367 * interface: the page must be installed in the object, fully
368 * valid, and exclusively busied.
369 */
370 MPASS(m != NULL);
290 rv = KERN_FAILURE;
291 goto out;
292 }
293 m_map = m;
294 psind = 0;
295#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
296 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
297 VM_NRESERVLEVEL > 0

--- 66 unchanged lines hidden (view full) ---

364{
365
366 /*
367 * Check each page to ensure that the pager is obeying the
368 * interface: the page must be installed in the object, fully
369 * valid, and exclusively busied.
370 */
371 MPASS(m != NULL);
371 MPASS(m->valid == VM_PAGE_BITS_ALL);
372 MPASS(vm_page_all_valid(m));
372 MPASS(vm_page_xbusied(m));
373}
374
375static void
376vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
377 vm_pindex_t last)
378{
379 vm_page_t m;

--- 445 unchanged lines hidden (view full) ---

825 }
826
827 /*
828 * The page is marked busy for other processes and the
829 * pagedaemon. If it still isn't completely valid
830 * (readable), jump to readrest, else break-out ( we
831 * found the page ).
832 */
373 MPASS(vm_page_xbusied(m));
374}
375
376static void
377vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
378 vm_pindex_t last)
379{
380 vm_page_t m;

--- 445 unchanged lines hidden (view full) ---

826 }
827
828 /*
829 * The page is marked busy for other processes and the
830 * pagedaemon. If it still isn't completely valid
831 * (readable), jump to readrest, else break-out ( we
832 * found the page ).
833 */
833 if (fs.m->valid != VM_PAGE_BITS_ALL)
834 if (!vm_page_all_valid(fs.m))
834 goto readrest;
835 break; /* break to PAGE HAS BEEN FOUND */
836 }
837 KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m));
838
839 /*
840 * Page is not resident. If the pager might contain the page
841 * or this is the beginning of the search, allocate a new

--- 307 unchanged lines hidden (view full) ---

1149 * Zero the page if necessary and mark it valid.
1150 */
1151 if ((fs.m->flags & PG_ZERO) == 0) {
1152 pmap_zero_page(fs.m);
1153 } else {
1154 VM_CNT_INC(v_ozfod);
1155 }
1156 VM_CNT_INC(v_zfod);
835 goto readrest;
836 break; /* break to PAGE HAS BEEN FOUND */
837 }
838 KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m));
839
840 /*
841 * Page is not resident. If the pager might contain the page
842 * or this is the beginning of the search, allocate a new

--- 307 unchanged lines hidden (view full) ---

1150 * Zero the page if necessary and mark it valid.
1151 */
1152 if ((fs.m->flags & PG_ZERO) == 0) {
1153 pmap_zero_page(fs.m);
1154 } else {
1155 VM_CNT_INC(v_ozfod);
1156 }
1157 VM_CNT_INC(v_zfod);
1157 fs.m->valid = VM_PAGE_BITS_ALL;
1158 vm_page_valid(fs.m);
1158 /* Don't try to prefault neighboring pages. */
1159 faultcount = 1;
1160 break; /* break to PAGE HAS BEEN FOUND */
1161 } else {
1162 KASSERT(fs.object != next_object,
1163 ("object loop %p", next_object));
1164 VM_OBJECT_WLOCK(next_object);
1165 vm_object_pip_add(next_object, 1);

--- 74 unchanged lines hidden (view full) ---

1240 fs.first_m = fs.m;
1241 fs.m = NULL;
1242 VM_CNT_INC(v_cow_optim);
1243 } else {
1244 /*
1245 * Oh, well, lets copy it.
1246 */
1247 pmap_copy_page(fs.m, fs.first_m);
1159 /* Don't try to prefault neighboring pages. */
1160 faultcount = 1;
1161 break; /* break to PAGE HAS BEEN FOUND */
1162 } else {
1163 KASSERT(fs.object != next_object,
1164 ("object loop %p", next_object));
1165 VM_OBJECT_WLOCK(next_object);
1166 vm_object_pip_add(next_object, 1);

--- 74 unchanged lines hidden (view full) ---

1241 fs.first_m = fs.m;
1242 fs.m = NULL;
1243 VM_CNT_INC(v_cow_optim);
1244 } else {
1245 /*
1246 * Oh, well, lets copy it.
1247 */
1248 pmap_copy_page(fs.m, fs.first_m);
1248 fs.first_m->valid = VM_PAGE_BITS_ALL;
1249 vm_page_valid(fs.first_m);
1249 if (wired && (fault_flags &
1250 VM_FAULT_WIRE) == 0) {
1251 vm_page_wire(fs.first_m);
1252 vm_page_unwire(fs.m, PQ_INACTIVE);
1253 }
1254 /*
1255 * We no longer need the old page or object.
1256 */

--- 102 unchanged lines hidden (view full) ---

1359
1360 vm_page_assert_xbusied(fs.m);
1361 vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true);
1362
1363 /*
1364 * Page must be completely valid or it is not fit to
1365 * map into user space. vm_pager_get_pages() ensures this.
1366 */
1250 if (wired && (fault_flags &
1251 VM_FAULT_WIRE) == 0) {
1252 vm_page_wire(fs.first_m);
1253 vm_page_unwire(fs.m, PQ_INACTIVE);
1254 }
1255 /*
1256 * We no longer need the old page or object.
1257 */

--- 102 unchanged lines hidden (view full) ---

1360
1361 vm_page_assert_xbusied(fs.m);
1362 vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true);
1363
1364 /*
1365 * Page must be completely valid or it is not fit to
1366 * map into user space. vm_pager_get_pages() ensures this.
1367 */
1367 KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
1368 KASSERT(vm_page_all_valid(fs.m),
1368 ("vm_fault: page %p partially invalid", fs.m));
1369 VM_OBJECT_WUNLOCK(fs.object);
1370
1371 /*
1372 * Put this page into the physical map. We had to do the unlock above
1373 * because pmap_enter() may sleep. We don't put the page
1374 * back on the active queue until later so that the pageout daemon
1375 * won't find it (yet).

--- 99 unchanged lines hidden (view full) ---

1475 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1476 pstart = OFF_TO_IDX(entry->offset) + atop(start -
1477 entry->start);
1478 m_next = vm_page_find_least(first_object, pstart);
1479 pend = OFF_TO_IDX(entry->offset) + atop(end -
1480 entry->start);
1481 while ((m = m_next) != NULL && m->pindex < pend) {
1482 m_next = TAILQ_NEXT(m, listq);
1369 ("vm_fault: page %p partially invalid", fs.m));
1370 VM_OBJECT_WUNLOCK(fs.object);
1371
1372 /*
1373 * Put this page into the physical map. We had to do the unlock above
1374 * because pmap_enter() may sleep. We don't put the page
1375 * back on the active queue until later so that the pageout daemon
1376 * won't find it (yet).

--- 99 unchanged lines hidden (view full) ---

1476 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1477 pstart = OFF_TO_IDX(entry->offset) + atop(start -
1478 entry->start);
1479 m_next = vm_page_find_least(first_object, pstart);
1480 pend = OFF_TO_IDX(entry->offset) + atop(end -
1481 entry->start);
1482 while ((m = m_next) != NULL && m->pindex < pend) {
1483 m_next = TAILQ_NEXT(m, listq);
1483 if (m->valid != VM_PAGE_BITS_ALL ||
1484 if (!vm_page_all_valid(m) ||
1484 vm_page_busied(m))
1485 continue;
1486
1487 /*
1488 * Don't clear PGA_REFERENCED, since it would
1489 * likely represent a reference by a different
1490 * process.
1491 *

--- 80 unchanged lines hidden (view full) ---

1572 VM_OBJECT_RUNLOCK(lobject);
1573 lobject = backing_object;
1574 }
1575 if (m == NULL) {
1576 if (!obj_locked || lobject != entry->object.vm_object)
1577 VM_OBJECT_RUNLOCK(lobject);
1578 break;
1579 }
1485 vm_page_busied(m))
1486 continue;
1487
1488 /*
1489 * Don't clear PGA_REFERENCED, since it would
1490 * likely represent a reference by a different
1491 * process.
1492 *

--- 80 unchanged lines hidden (view full) ---

1573 VM_OBJECT_RUNLOCK(lobject);
1574 lobject = backing_object;
1575 }
1576 if (m == NULL) {
1577 if (!obj_locked || lobject != entry->object.vm_object)
1578 VM_OBJECT_RUNLOCK(lobject);
1579 break;
1580 }
1580 if (m->valid == VM_PAGE_BITS_ALL &&
1581 if (vm_page_all_valid(m) &&
1581 (m->flags & PG_FICTITIOUS) == 0)
1582 pmap_enter_quick(pmap, addr, m, entry->protection);
1583 if (!obj_locked || lobject != entry->object.vm_object)
1584 VM_OBJECT_RUNLOCK(lobject);
1585 }
1586}
1587
1588/*

--- 258 unchanged lines hidden (view full) ---

1847 *
1848 * The page can be invalid if the user called
1849 * msync(MS_INVALIDATE) or truncated the backing vnode
1850 * or shared memory object. In this case, do not
1851 * insert it into pmap, but still do the copy so that
1852 * all copies of the wired map entry have similar
1853 * backing pages.
1854 */
1582 (m->flags & PG_FICTITIOUS) == 0)
1583 pmap_enter_quick(pmap, addr, m, entry->protection);
1584 if (!obj_locked || lobject != entry->object.vm_object)
1585 VM_OBJECT_RUNLOCK(lobject);
1586 }
1587}
1588
1589/*

--- 258 unchanged lines hidden (view full) ---

1848 *
1849 * The page can be invalid if the user called
1850 * msync(MS_INVALIDATE) or truncated the backing vnode
1851 * or shared memory object. In this case, do not
1852 * insert it into pmap, but still do the copy so that
1853 * all copies of the wired map entry have similar
1854 * backing pages.
1855 */
1855 if (dst_m->valid == VM_PAGE_BITS_ALL) {
1856 if (vm_page_all_valid(dst_m)) {
1856 pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
1857 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
1858 }
1859
1860 /*
1861 * Mark it no longer busy, and put it on the active list.
1862 */
1863 VM_OBJECT_WLOCK(dst_object);

--- 42 unchanged lines hidden ---
1857 pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
1858 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
1859 }
1860
1861 /*
1862 * Mark it no longer busy, and put it on the active list.
1863 */
1864 VM_OBJECT_WLOCK(dst_object);

--- 42 unchanged lines hidden ---