vm_fault.c (945f418ab8a676a9675bf5b845514cf63dbc652a) | vm_fault.c (eb00b276ab2a5a549620ec3fe92c22fd0ddf948e) |
---|---|
1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 150 unchanged lines hidden (view full) --- 159unlock_and_deallocate(struct faultstate *fs) 160{ 161 162 vm_object_pip_wakeup(fs->object); 163 VM_OBJECT_UNLOCK(fs->object); 164 if (fs->object != fs->first_object) { 165 VM_OBJECT_LOCK(fs->first_object); 166 vm_page_lock(fs->first_m); | 1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 150 unchanged lines hidden (view full) --- 159unlock_and_deallocate(struct faultstate *fs) 160{ 161 162 vm_object_pip_wakeup(fs->object); 163 VM_OBJECT_UNLOCK(fs->object); 164 if (fs->object != fs->first_object) { 165 VM_OBJECT_LOCK(fs->first_object); 166 vm_page_lock(fs->first_m); |
167 vm_page_lock_queues(); | |
168 vm_page_free(fs->first_m); | 167 vm_page_free(fs->first_m); |
169 vm_page_unlock_queues(); | |
170 vm_page_unlock(fs->first_m); 171 vm_object_pip_wakeup(fs->first_object); 172 VM_OBJECT_UNLOCK(fs->first_object); 173 fs->first_m = NULL; 174 } 175 vm_object_deallocate(fs->first_object); 176 unlock_map(fs); 177 if (fs->vp != NULL) { --- 165 unchanged lines hidden (view full) --- 343 */ 344 vm_page_flag_set(fs.m, PG_REFERENCED); 345 vm_page_unlock_queues(); 346 vm_page_unlock(fs.m); 347 VM_OBJECT_UNLOCK(fs.object); 348 if (fs.object != fs.first_object) { 349 VM_OBJECT_LOCK(fs.first_object); 350 vm_page_lock(fs.first_m); | 168 vm_page_unlock(fs->first_m); 169 vm_object_pip_wakeup(fs->first_object); 170 VM_OBJECT_UNLOCK(fs->first_object); 171 fs->first_m = NULL; 172 } 173 vm_object_deallocate(fs->first_object); 174 unlock_map(fs); 175 if (fs->vp != NULL) { --- 165 unchanged lines hidden (view full) --- 341 */ 342 vm_page_flag_set(fs.m, PG_REFERENCED); 343 vm_page_unlock_queues(); 344 vm_page_unlock(fs.m); 345 VM_OBJECT_UNLOCK(fs.object); 346 if (fs.object != fs.first_object) { 347 VM_OBJECT_LOCK(fs.first_object); 348 vm_page_lock(fs.first_m); |
351 vm_page_lock_queues(); | |
352 vm_page_free(fs.first_m); | 349 vm_page_free(fs.first_m); |
353 vm_page_unlock_queues(); | |
354 vm_page_unlock(fs.first_m); 355 vm_object_pip_wakeup(fs.first_object); 356 VM_OBJECT_UNLOCK(fs.first_object); 357 fs.first_m = NULL; 358 } 359 unlock_map(&fs); 360 VM_OBJECT_LOCK(fs.object); 361 if (fs.m == vm_page_lookup(fs.object, --- 271 unchanged lines hidden (view full) --- 633 /* 634 * XXX - the check for kernel_map is a kludge to work 635 * around having the machine panic on a kernel space 636 * fault w/ I/O error. 637 */ 638 if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 639 (rv == VM_PAGER_BAD)) { 640 vm_page_lock(fs.m); | 350 vm_page_unlock(fs.first_m); 351 vm_object_pip_wakeup(fs.first_object); 352 VM_OBJECT_UNLOCK(fs.first_object); 353 fs.first_m = NULL; 354 } 355 unlock_map(&fs); 356 VM_OBJECT_LOCK(fs.object); 357 if (fs.m == vm_page_lookup(fs.object, --- 271 unchanged lines hidden (view full) --- 629 /* 630 * XXX - the check for kernel_map is a kludge to work 631 * around having the machine panic on a kernel space 632 * fault w/ I/O error. 633 */ 634 if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 635 (rv == VM_PAGER_BAD)) { 636 vm_page_lock(fs.m); |
641 vm_page_lock_queues(); | |
642 vm_page_free(fs.m); | 637 vm_page_free(fs.m); |
643 vm_page_unlock_queues(); | |
644 vm_page_unlock(fs.m); 645 fs.m = NULL; 646 unlock_and_deallocate(&fs); 647 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 648 } 649 if (fs.object != fs.first_object) { 650 vm_page_lock(fs.m); | 638 vm_page_unlock(fs.m); 639 fs.m = NULL; 640 unlock_and_deallocate(&fs); 641 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 642 } 643 if (fs.object != fs.first_object) { 644 vm_page_lock(fs.m); |
651 vm_page_lock_queues(); | |
652 vm_page_free(fs.m); | 645 vm_page_free(fs.m); |
653 vm_page_unlock_queues(); | |
654 vm_page_unlock(fs.m); 655 fs.m = NULL; 656 /* 657 * XXX - we cannot just fall out at this 658 * point, m has been freed and is invalid! 659 */ 660 } 661 } --- 97 unchanged lines hidden (view full) --- 759 */ 760 ((fs.object->type == OBJT_DEFAULT) || 761 (fs.object->type == OBJT_SWAP)) && 762 (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && 763 /* 764 * We don't chase down the shadow chain 765 */ 766 fs.object == fs.first_object->backing_object) { | 646 vm_page_unlock(fs.m); 647 fs.m = NULL; 648 /* 649 * XXX - we cannot just fall out at this 650 * point, m has been freed and is invalid! 651 */ 652 } 653 } --- 97 unchanged lines hidden (view full) --- 751 */ 752 ((fs.object->type == OBJT_DEFAULT) || 753 (fs.object->type == OBJT_SWAP)) && 754 (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && 755 /* 756 * We don't chase down the shadow chain 757 */ 758 fs.object == fs.first_object->backing_object) { |
767 vm_page_lock(fs.first_m); 768 vm_page_lock_queues(); | |
769 /* 770 * get rid of the unnecessary page 771 */ | 759 /* 760 * get rid of the unnecessary page 761 */ |
762 vm_page_lock(fs.first_m); |
|
772 vm_page_free(fs.first_m); | 763 vm_page_free(fs.first_m); |
773 vm_page_unlock_queues(); | |
774 vm_page_unlock(fs.first_m); 775 /* 776 * grab the page and put it into the 777 * process'es object. The page is 778 * automatically made dirty. 779 */ 780 vm_page_lock(fs.m); 781 vm_page_rename(fs.m, fs.first_object, fs.first_pindex); --- 640 unchanged lines hidden --- | 764 vm_page_unlock(fs.first_m); 765 /* 766 * grab the page and put it into the 767 * process'es object. The page is 768 * automatically made dirty. 769 */ 770 vm_page_lock(fs.m); 771 vm_page_rename(fs.m, fs.first_object, fs.first_pindex); --- 640 unchanged lines hidden --- |