vm_fault.c (4e27d36d38f4c3b12bcc1855c5d41527d08d1ce0) vm_fault.c (a36f55322c71692896ccd8891ff852fc0dd87697)
1/*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 160 unchanged lines hidden (view full) ---

169 vm_object_deallocate(fs->first_object);
170 unlock_map(fs);
171 if (fs->vp != NULL) {
172 vput(fs->vp);
173 fs->vp = NULL;
174 }
175}
176
1/*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 160 unchanged lines hidden (view full) ---

169 vm_object_deallocate(fs->first_object);
170 unlock_map(fs);
171 if (fs->vp != NULL) {
172 vput(fs->vp);
173 fs->vp = NULL;
174 }
175}
176
177static void
178vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
179 vm_prot_t fault_type, int fault_flags, boolean_t set_wd)
180{
181 boolean_t need_dirty;
182
183 if (((prot & VM_PROT_WRITE) == 0 &&
184 (fault_flags & VM_FAULT_DIRTY) == 0) ||
185 (m->oflags & VPO_UNMANAGED) != 0)
186 return;
187
188 VM_OBJECT_ASSERT_LOCKED(m->object);
189
190 need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
191 (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) ||
192 (fault_flags & VM_FAULT_DIRTY) != 0;
193
194 if (set_wd)
195 vm_object_set_writeable_dirty(m->object);
196 else
197 /*
198 * If two callers of vm_fault_dirty() with set_wd ==
199 * FALSE, one for the map entry with MAP_ENTRY_NOSYNC
200 * flag set, other with flag clear, race, it is
201 * possible for the no-NOSYNC thread to see m->dirty
202 * != 0 and not clear VPO_NOSYNC. Take vm_page lock
203 * around manipulation of VPO_NOSYNC and
204 * vm_page_dirty() call, to avoid the race and keep
205 * m->oflags consistent.
206 */
207 vm_page_lock(m);
208
209 /*
210 * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
211 * if the page is already dirty to prevent data written with
212 * the expectation of being synced from not being synced.
213 * Likewise if this entry does not request NOSYNC then make
214 * sure the page isn't marked NOSYNC. Applications sharing
215 * data should use the same flags to avoid ping ponging.
216 */
217 if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) {
218 if (m->dirty == 0) {
219 m->oflags |= VPO_NOSYNC;
220 }
221 } else {
222 m->oflags &= ~VPO_NOSYNC;
223 }
224
225 /*
226 * If the fault is a write, we know that this page is being
227 * written NOW so dirty it explicitly to save on
228 * pmap_is_modified() calls later.
229 *
230 * Also tell the backing pager, if any, that it should remove
231 * any swap backing since the page is now dirty.
232 */
233 if (need_dirty)
234 vm_page_dirty(m);
235 if (!set_wd)
236 vm_page_unlock(m);
237 if (need_dirty)
238 vm_pager_page_unswapped(m);
239}
240
177/*
178 * TRYPAGER - used by vm_fault to calculate whether the pager for the
179 * current object *might* contain the page.
180 *
181 * default objects are zero-fill, there is no real pager.
182 */
183#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
184 ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))

--- 131 unchanged lines hidden (view full) ---

316 if (result != KERN_SUCCESS)
317 goto fast_failed;
318 if (m_hold != NULL) {
319 *m_hold = m;
320 vm_page_lock(m);
321 vm_page_hold(m);
322 vm_page_unlock(m);
323 }
241/*
242 * TRYPAGER - used by vm_fault to calculate whether the pager for the
243 * current object *might* contain the page.
244 *
245 * default objects are zero-fill, there is no real pager.
246 */
247#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
248 ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))

--- 131 unchanged lines hidden (view full) ---

380 if (result != KERN_SUCCESS)
381 goto fast_failed;
382 if (m_hold != NULL) {
383 *m_hold = m;
384 vm_page_lock(m);
385 vm_page_hold(m);
386 vm_page_unlock(m);
387 }
324 if ((fault_type & VM_PROT_WRITE) != 0 &&
325 (m->oflags & VPO_UNMANAGED) == 0) {
326 vm_page_dirty(m);
327 vm_pager_page_unswapped(m);
328 }
388 vm_fault_dirty(fs.entry, m, prot, fault_type, fault_flags,
389 FALSE);
329 VM_OBJECT_RUNLOCK(fs.first_object);
330 if (!wired)
331 vm_fault_prefault(&fs, vaddr, 0, 0);
332 vm_map_lookup_done(fs.map, fs.entry);
333 curthread->td_ru.ru_minflt++;
334 return (KERN_SUCCESS);
335fast_failed:
336 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {

--- 556 unchanged lines hidden (view full) ---

893 * sets are the same.
894 *
895 * XXX The following assignment modifies the map
896 * without holding a write lock on it.
897 */
898 if (hardfault)
899 fs.entry->next_read = fs.pindex + faultcount - reqpage;
900
390 VM_OBJECT_RUNLOCK(fs.first_object);
391 if (!wired)
392 vm_fault_prefault(&fs, vaddr, 0, 0);
393 vm_map_lookup_done(fs.map, fs.entry);
394 curthread->td_ru.ru_minflt++;
395 return (KERN_SUCCESS);
396fast_failed:
397 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {

--- 556 unchanged lines hidden (view full) ---

954 * sets are the same.
955 *
956 * XXX The following assignment modifies the map
957 * without holding a write lock on it.
958 */
959 if (hardfault)
960 fs.entry->next_read = fs.pindex + faultcount - reqpage;
961
901 if (((prot & VM_PROT_WRITE) != 0 ||
902 (fault_flags & VM_FAULT_DIRTY) != 0) &&
903 (fs.m->oflags & VPO_UNMANAGED) == 0) {
904 vm_object_set_writeable_dirty(fs.object);
905
906 /*
907 * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
908 * if the page is already dirty to prevent data written with
909 * the expectation of being synced from not being synced.
910 * Likewise if this entry does not request NOSYNC then make
911 * sure the page isn't marked NOSYNC. Applications sharing
912 * data should use the same flags to avoid ping ponging.
913 */
914 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
915 if (fs.m->dirty == 0)
916 fs.m->oflags |= VPO_NOSYNC;
917 } else {
918 fs.m->oflags &= ~VPO_NOSYNC;
919 }
920
921 /*
922 * If the fault is a write, we know that this page is being
923 * written NOW so dirty it explicitly to save on
924 * pmap_is_modified() calls later.
925 *
926 * Also tell the backing pager, if any, that it should remove
927 * any swap backing since the page is now dirty.
928 */
929 if (((fault_type & VM_PROT_WRITE) != 0 &&
930 (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) ||
931 (fault_flags & VM_FAULT_DIRTY) != 0) {
932 vm_page_dirty(fs.m);
933 vm_pager_page_unswapped(fs.m);
934 }
935 }
936
962 vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, TRUE);
937 vm_page_assert_xbusied(fs.m);
938
939 /*
940 * Page must be completely valid or it is not fit to
941 * map into user space. vm_pager_get_pages() ensures this.
942 */
943 KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
944 ("vm_fault: page %p partially invalid", fs.m));

--- 605 unchanged lines hidden ---
963 vm_page_assert_xbusied(fs.m);
964
965 /*
966 * Page must be completely valid or it is not fit to
967 * map into user space. vm_pager_get_pages() ensures this.
968 */
969 KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
970 ("vm_fault: page %p partially invalid", fs.m));

--- 605 unchanged lines hidden ---