vm_fault.c (eebf3286a6cf53a1cbb77626a5b2d9d17555a2ac) vm_fault.c (86735996620428e22541098fec8d308481237ca3)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 307 unchanged lines hidden (view full) ---

316 return (KERN_PROTECTION_FAILURE);
317 }
318
319 /*
320 * See if page is resident
321 */
322 fs.m = vm_page_lookup(fs.object, fs.pindex);
323 if (fs.m != NULL) {
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 307 unchanged lines hidden (view full) ---

316 return (KERN_PROTECTION_FAILURE);
317 }
318
319 /*
320 * See if page is resident
321 */
322 fs.m = vm_page_lookup(fs.object, fs.pindex);
323 if (fs.m != NULL) {
324 int queue, s;
324 int queue;
325
326 /*
327 * check for page-based copy on write.
328 * We check fs.object == fs.first_object so
329 * as to ensure the legacy COW mechanism is
330 * used when the page in question is part of
331 * a shadow object. Otherwise, vm_page_cowfault()
332 * removes the page from the backing object,
333 * which is not what we want.
334 */
335 vm_page_lock_queues();
336 if ((fs.m->cow) &&
337 (fault_type & VM_PROT_WRITE) &&
338 (fs.object == fs.first_object)) {
325
326 /*
327 * check for page-based copy on write.
328 * We check fs.object == fs.first_object so
329 * as to ensure the legacy COW mechanism is
330 * used when the page in question is part of
331 * a shadow object. Otherwise, vm_page_cowfault()
332 * removes the page from the backing object,
333 * which is not what we want.
334 */
335 vm_page_lock_queues();
336 if ((fs.m->cow) &&
337 (fault_type & VM_PROT_WRITE) &&
338 (fs.object == fs.first_object)) {
339 s = splvm();
340 vm_page_cowfault(fs.m);
339 vm_page_cowfault(fs.m);
341 splx(s);
342 vm_page_unlock_queues();
343 unlock_and_deallocate(&fs);
344 goto RetryFault;
345 }
346
347 /*
348 * Wait/Retry if the page is busy. We have to do this
349 * if the page is busy via either PG_BUSY or

--- 18 unchanged lines hidden (view full) ---

368 vm_page_unlock_queues();
369 cnt.v_intrans++;
370 mtx_unlock(&Giant);
371 vm_object_deallocate(fs.first_object);
372 goto RetryFault;
373 }
374 queue = fs.m->queue;
375
340 vm_page_unlock_queues();
341 unlock_and_deallocate(&fs);
342 goto RetryFault;
343 }
344
345 /*
346 * Wait/Retry if the page is busy. We have to do this
347 * if the page is busy via either PG_BUSY or

--- 18 unchanged lines hidden (view full) ---

366 vm_page_unlock_queues();
367 cnt.v_intrans++;
368 mtx_unlock(&Giant);
369 vm_object_deallocate(fs.first_object);
370 goto RetryFault;
371 }
372 queue = fs.m->queue;
373
376 s = splvm();
377 vm_pageq_remove_nowakeup(fs.m);
374 vm_pageq_remove_nowakeup(fs.m);
378 splx(s);
379
380 if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
381 vm_page_activate(fs.m);
382 vm_page_unlock_queues();
383 unlock_and_deallocate(&fs);
384 VM_WAITPFAULT;
385 goto RetryFault;
386 }

--- 436 unchanged lines hidden (view full) ---

823 * been copied while we left the map unlocked. Changing from
824 * read to write permission is OK - we leave the page
825 * write-protected, and catch the write fault. Changing from
826 * write to read permission means that we can't mark the page
827 * write-enabled after all.
828 */
829 prot &= retry_prot;
830 }
375
376 if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
377 vm_page_activate(fs.m);
378 vm_page_unlock_queues();
379 unlock_and_deallocate(&fs);
380 VM_WAITPFAULT;
381 goto RetryFault;
382 }

--- 436 unchanged lines hidden (view full) ---

819 * been copied while we left the map unlocked. Changing from
820 * read to write permission is OK - we leave the page
821 * write-protected, and catch the write fault. Changing from
822 * write to read permission means that we can't mark the page
823 * write-enabled after all.
824 */
825 prot &= retry_prot;
826 }
831
832 /*
833 * Put this page into the physical map. We had to do the unlock above
834 * because pmap_enter may cause other faults. We don't put the page
835 * back on the active queue until later so that the page-out daemon
836 * won't find us (yet).
837 */
838
839 if (prot & VM_PROT_WRITE) {
840 vm_page_lock_queues();
841 vm_page_flag_set(fs.m, PG_WRITEABLE);
842 vm_object_set_writeable_dirty(fs.m->object);
843
844 /*
845 * If the fault is a write, we know that this page is being
846 * written NOW so dirty it explicitly to save on

--- 12 unchanged lines hidden (view full) ---

859 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
860 if (fs.m->dirty == 0)
861 vm_page_flag_set(fs.m, PG_NOSYNC);
862 } else {
863 vm_page_flag_clear(fs.m, PG_NOSYNC);
864 }
865 vm_page_unlock_queues();
866 if (fault_flags & VM_FAULT_DIRTY) {
827 if (prot & VM_PROT_WRITE) {
828 vm_page_lock_queues();
829 vm_page_flag_set(fs.m, PG_WRITEABLE);
830 vm_object_set_writeable_dirty(fs.m->object);
831
832 /*
833 * If the fault is a write, we know that this page is being
834 * written NOW so dirty it explicitly to save on

--- 12 unchanged lines hidden (view full) ---

847 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
848 if (fs.m->dirty == 0)
849 vm_page_flag_set(fs.m, PG_NOSYNC);
850 } else {
851 vm_page_flag_clear(fs.m, PG_NOSYNC);
852 }
853 vm_page_unlock_queues();
854 if (fault_flags & VM_FAULT_DIRTY) {
867 int s;
868 vm_page_dirty(fs.m);
855 vm_page_dirty(fs.m);
869 s = splvm();
870 vm_pager_page_unswapped(fs.m);
856 vm_pager_page_unswapped(fs.m);
871 splx(s);
872 }
873 }
874
875 /*
876 * Page had better still be busy
877 */
878 KASSERT(fs.m->flags & PG_BUSY,
879 ("vm_fault: page %p not busy!", fs.m));
880 /*
881 * Sanity check: page must be completely valid or it is not fit to
882 * map into user space. vm_pager_get_pages() ensures this.
883 */
884 if (fs.m->valid != VM_PAGE_BITS_ALL) {
885 vm_page_zero_invalid(fs.m, TRUE);
886 printf("Warning: page %p partially invalid on fault\n", fs.m);
887 }
888 VM_OBJECT_UNLOCK(fs.object);
889
857 }
858 }
859
860 /*
861 * Page had better still be busy
862 */
863 KASSERT(fs.m->flags & PG_BUSY,
864 ("vm_fault: page %p not busy!", fs.m));
865 /*
866 * Sanity check: page must be completely valid or it is not fit to
867 * map into user space. vm_pager_get_pages() ensures this.
868 */
869 if (fs.m->valid != VM_PAGE_BITS_ALL) {
870 vm_page_zero_invalid(fs.m, TRUE);
871 printf("Warning: page %p partially invalid on fault\n", fs.m);
872 }
873 VM_OBJECT_UNLOCK(fs.object);
874
875 /*
876 * Put this page into the physical map. We had to do the unlock above
877 * because pmap_enter() may sleep. We don't put the page
878 * back on the active queue until later so that the pageout daemon
879 * won't find it (yet).
880 */
890 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
891 if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
892 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
893 }
894 VM_OBJECT_LOCK(fs.object);
895 vm_page_lock_queues();
896 vm_page_flag_set(fs.m, PG_REFERENCED);
897

--- 463 unchanged lines hidden ---
881 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
882 if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
883 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
884 }
885 VM_OBJECT_LOCK(fs.object);
886 vm_page_lock_queues();
887 vm_page_flag_set(fs.m, PG_REFERENCED);
888

--- 463 unchanged lines hidden ---