vm_fault.c (5050aa86cff105784877fb886a7b1d25bca5813b) | vm_fault.c (bc79b37f2c911b81f09f9faebd1ce07e23519806) |
---|---|
1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 1273 unchanged lines hidden (view full) --- 1282 * the constituent small page mappings are modified. Marking 1283 * PTEs as modified on inception allows promotion to happen 1284 * without taking potentially large number of soft faults. 1285 */ 1286 if (!upgrade) 1287 access &= ~VM_PROT_WRITE; 1288 1289 /* | 1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 1273 unchanged lines hidden (view full) --- 1282 * the constituent small page mappings are modified. Marking 1283 * PTEs as modified on inception allows promotion to happen 1284 * without taking potentially large number of soft faults. 1285 */ 1286 if (!upgrade) 1287 access &= ~VM_PROT_WRITE; 1288 1289 /* |
1290 * Loop through all of the pages in the entry's range, copying each 1291 * one from the source object (it should be there) to the destination 1292 * object. | 1290 * Loop through all of the pages in the entry's range, copying 1291 * each one from the source object (it should be there) to the 1292 * destination object. Note that copied pages are not wired 1293 * and marked dirty to prevent reclamation without saving the 1294 * content into the swap file on pageout. |
1293 */ 1294 for (vaddr = dst_entry->start, dst_pindex = 0; 1295 vaddr < dst_entry->end; 1296 vaddr += PAGE_SIZE, dst_pindex++) { 1297 1298 /* 1299 * Allocate a page in the destination object. 1300 */ --- 26 unchanged lines hidden (view full) --- 1327 VM_OBJECT_UNLOCK(object); 1328 object = backing_object; 1329 } 1330 if (src_m == NULL) 1331 panic("vm_fault_copy_wired: page missing"); 1332 pmap_copy_page(src_m, dst_m); 1333 VM_OBJECT_UNLOCK(object); 1334 dst_m->valid = VM_PAGE_BITS_ALL; | 1295 */ 1296 for (vaddr = dst_entry->start, dst_pindex = 0; 1297 vaddr < dst_entry->end; 1298 vaddr += PAGE_SIZE, dst_pindex++) { 1299 1300 /* 1301 * Allocate a page in the destination object. 1302 */ --- 26 unchanged lines hidden (view full) --- 1329 VM_OBJECT_UNLOCK(object); 1330 object = backing_object; 1331 } 1332 if (src_m == NULL) 1333 panic("vm_fault_copy_wired: page missing"); 1334 pmap_copy_page(src_m, dst_m); 1335 VM_OBJECT_UNLOCK(object); 1336 dst_m->valid = VM_PAGE_BITS_ALL; |
1337 dst_m->dirty = VM_PAGE_BITS_ALL; |
|
1335 VM_OBJECT_UNLOCK(dst_object); 1336 1337 /* 1338 * Enter it in the pmap. If a wired, copy-on-write 1339 * mapping is being replaced by a write-enabled 1340 * mapping, then wire that new mapping. 1341 */ 1342 pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); --- 174 unchanged lines hidden --- | 1338 VM_OBJECT_UNLOCK(dst_object); 1339 1340 /* 1341 * Enter it in the pmap. If a wired, copy-on-write 1342 * mapping is being replaced by a write-enabled 1343 * mapping, then wire that new mapping. 1344 */ 1345 pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); --- 174 unchanged lines hidden --- |