1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 vm_page_t m; 124 vm_object_t object; 125 vm_pindex_t pindex; 126 vm_page_t first_m; 127 vm_object_t first_object; 128 vm_pindex_t first_pindex; 129 vm_map_t map; 130 vm_map_entry_t entry; 131 int map_generation; 132 bool lookup_still_valid; 133 struct vnode *vp; 134 }; 135 136 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 137 int ahead); 138 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 139 int backward, int forward, bool obj_locked); 140 141 static int vm_pfault_oom_attempts = 3; 142 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 143 &vm_pfault_oom_attempts, 0, 144 "Number of page allocation attempts in page fault handler before it " 145 "triggers OOM handling"); 146 147 static int vm_pfault_oom_wait = 10; 148 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 149 &vm_pfault_oom_wait, 0, 150 "Number of seconds to wait for free pages before retrying " 151 "the page fault handler"); 152 153 static inline void 154 release_page(struct faultstate *fs) 155 { 156 157 if (fs->m != NULL) { 158 vm_page_xunbusy(fs->m); 159 vm_page_lock(fs->m); 160 vm_page_deactivate(fs->m); 161 vm_page_unlock(fs->m); 162 fs->m = NULL; 163 } 164 } 165 166 static inline void 167 unlock_map(struct faultstate *fs) 168 { 169 170 if (fs->lookup_still_valid) { 171 vm_map_lookup_done(fs->map, fs->entry); 172 fs->lookup_still_valid = false; 173 } 174 } 175 176 static void 177 unlock_vp(struct faultstate *fs) 178 { 179 180 if (fs->vp != NULL) { 181 vput(fs->vp); 182 fs->vp = NULL; 183 } 184 } 185 186 static void 187 unlock_and_deallocate(struct faultstate *fs) 188 { 189 190 vm_object_pip_wakeup(fs->object); 191 VM_OBJECT_WUNLOCK(fs->object); 192 if (fs->object != fs->first_object) { 193 VM_OBJECT_WLOCK(fs->first_object); 194 vm_page_free(fs->first_m); 195 vm_object_pip_wakeup(fs->first_object); 196 VM_OBJECT_WUNLOCK(fs->first_object); 197 fs->first_m = NULL; 198 } 199 vm_object_deallocate(fs->first_object); 200 unlock_map(fs); 201 unlock_vp(fs); 202 } 203 204 static void 205 vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, 206 vm_prot_t fault_type, int fault_flags, bool set_wd) 207 { 208 bool need_dirty; 209 210 if (((prot & VM_PROT_WRITE) == 0 && 211 (fault_flags & VM_FAULT_DIRTY) == 0) || 212 (m->oflags & VPO_UNMANAGED) != 0) 213 return; 214 215 VM_OBJECT_ASSERT_LOCKED(m->object); 216 VM_PAGE_OBJECT_BUSY_ASSERT(m); 217 218 need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && 219 (fault_flags & VM_FAULT_WIRE) == 0) || 220 (fault_flags & VM_FAULT_DIRTY) != 0; 221 222 if (set_wd) 223 vm_object_set_writeable_dirty(m->object); 224 else 225 /* 226 * If two callers of vm_fault_dirty() with set_wd == 227 * FALSE, one for the map entry with MAP_ENTRY_NOSYNC 228 * flag set, other with flag clear, race, it is 229 * possible for the no-NOSYNC thread to see m->dirty 230 * != 0 and not clear PGA_NOSYNC. Take vm_page lock 231 * around manipulation of PGA_NOSYNC and 232 * vm_page_dirty() call to avoid the race. 233 */ 234 vm_page_lock(m); 235 236 /* 237 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 238 * if the page is already dirty to prevent data written with 239 * the expectation of being synced from not being synced. 240 * Likewise if this entry does not request NOSYNC then make 241 * sure the page isn't marked NOSYNC. Applications sharing 242 * data should use the same flags to avoid ping ponging. 243 */ 244 if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) { 245 if (m->dirty == 0) { 246 vm_page_aflag_set(m, PGA_NOSYNC); 247 } 248 } else { 249 vm_page_aflag_clear(m, PGA_NOSYNC); 250 } 251 252 /* 253 * If the fault is a write, we know that this page is being 254 * written NOW so dirty it explicitly to save on 255 * pmap_is_modified() calls later. 256 * 257 * Also, since the page is now dirty, we can possibly tell 258 * the pager to release any swap backing the page. Calling 259 * the pager requires a write lock on the object. 260 */ 261 if (need_dirty) 262 vm_page_dirty(m); 263 if (!set_wd) 264 vm_page_unlock(m); 265 else if (need_dirty) 266 vm_pager_page_unswapped(m); 267 } 268 269 /* 270 * Unlocks fs.first_object and fs.map on success. 271 */ 272 static int 273 vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, 274 int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) 275 { 276 vm_page_t m, m_map; 277 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 278 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 279 VM_NRESERVLEVEL > 0 280 vm_page_t m_super; 281 int flags; 282 #endif 283 int psind, rv; 284 285 MPASS(fs->vp == NULL); 286 vm_object_busy(fs->first_object); 287 m = vm_page_lookup(fs->first_object, fs->first_pindex); 288 /* A busy page can be mapped for read|execute access. */ 289 if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && 290 vm_page_busied(m)) || !vm_page_all_valid(m)) { 291 rv = KERN_FAILURE; 292 goto out; 293 } 294 m_map = m; 295 psind = 0; 296 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 297 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 298 VM_NRESERVLEVEL > 0 299 if ((m->flags & PG_FICTITIOUS) == 0 && 300 (m_super = vm_reserv_to_superpage(m)) != NULL && 301 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 302 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 303 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 304 (pagesizes[m_super->psind] - 1)) && !wired && 305 pmap_ps_enabled(fs->map->pmap)) { 306 flags = PS_ALL_VALID; 307 if ((prot & VM_PROT_WRITE) != 0) { 308 /* 309 * Create a superpage mapping allowing write access 310 * only if none of the constituent pages are busy and 311 * all of them are already dirty (except possibly for 312 * the page that was faulted on). 313 */ 314 flags |= PS_NONE_BUSY; 315 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 316 flags |= PS_ALL_DIRTY; 317 } 318 if (vm_page_ps_test(m_super, flags, m)) { 319 m_map = m_super; 320 psind = m_super->psind; 321 vaddr = rounddown2(vaddr, pagesizes[psind]); 322 /* Preset the modified bit for dirty superpages. */ 323 if ((flags & PS_ALL_DIRTY) != 0) 324 fault_type |= VM_PROT_WRITE; 325 } 326 } 327 #endif 328 rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | 329 PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); 330 if (rv != KERN_SUCCESS) 331 goto out; 332 if (m_hold != NULL) { 333 *m_hold = m; 334 vm_page_wire(m); 335 } 336 vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); 337 if (psind == 0 && !wired) 338 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 339 VM_OBJECT_RUNLOCK(fs->first_object); 340 vm_map_lookup_done(fs->map, fs->entry); 341 curthread->td_ru.ru_minflt++; 342 343 out: 344 vm_object_unbusy(fs->first_object); 345 return (rv); 346 } 347 348 static void 349 vm_fault_restore_map_lock(struct faultstate *fs) 350 { 351 352 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 353 MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 354 355 if (!vm_map_trylock_read(fs->map)) { 356 VM_OBJECT_WUNLOCK(fs->first_object); 357 vm_map_lock_read(fs->map); 358 VM_OBJECT_WLOCK(fs->first_object); 359 } 360 fs->lookup_still_valid = true; 361 } 362 363 static void 364 vm_fault_populate_check_page(vm_page_t m) 365 { 366 367 /* 368 * Check each page to ensure that the pager is obeying the 369 * interface: the page must be installed in the object, fully 370 * valid, and exclusively busied. 371 */ 372 MPASS(m != NULL); 373 MPASS(vm_page_all_valid(m)); 374 MPASS(vm_page_xbusied(m)); 375 } 376 377 static void 378 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 379 vm_pindex_t last) 380 { 381 vm_page_t m; 382 vm_pindex_t pidx; 383 384 VM_OBJECT_ASSERT_WLOCKED(object); 385 MPASS(first <= last); 386 for (pidx = first, m = vm_page_lookup(object, pidx); 387 pidx <= last; pidx++, m = vm_page_next(m)) { 388 vm_fault_populate_check_page(m); 389 vm_page_lock(m); 390 vm_page_deactivate(m); 391 vm_page_unlock(m); 392 vm_page_xunbusy(m); 393 } 394 } 395 396 static int 397 vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type, 398 int fault_flags, boolean_t wired, vm_page_t *m_hold) 399 { 400 struct mtx *m_mtx; 401 vm_offset_t vaddr; 402 vm_page_t m; 403 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 404 int i, npages, psind, rv; 405 406 MPASS(fs->object == fs->first_object); 407 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 408 MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 409 MPASS(fs->first_object->backing_object == NULL); 410 MPASS(fs->lookup_still_valid); 411 412 pager_first = OFF_TO_IDX(fs->entry->offset); 413 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 414 unlock_map(fs); 415 unlock_vp(fs); 416 417 /* 418 * Call the pager (driver) populate() method. 419 * 420 * There is no guarantee that the method will be called again 421 * if the current fault is for read, and a future fault is 422 * for write. Report the entry's maximum allowed protection 423 * to the driver. 424 */ 425 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 426 fault_type, fs->entry->max_protection, &pager_first, &pager_last); 427 428 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 429 if (rv == VM_PAGER_BAD) { 430 /* 431 * VM_PAGER_BAD is the backdoor for a pager to request 432 * normal fault handling. 433 */ 434 vm_fault_restore_map_lock(fs); 435 if (fs->map->timestamp != fs->map_generation) 436 return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 437 return (KERN_NOT_RECEIVER); 438 } 439 if (rv != VM_PAGER_OK) 440 return (KERN_FAILURE); /* AKA SIGSEGV */ 441 442 /* Ensure that the driver is obeying the interface. */ 443 MPASS(pager_first <= pager_last); 444 MPASS(fs->first_pindex <= pager_last); 445 MPASS(fs->first_pindex >= pager_first); 446 MPASS(pager_last < fs->first_object->size); 447 448 vm_fault_restore_map_lock(fs); 449 if (fs->map->timestamp != fs->map_generation) { 450 vm_fault_populate_cleanup(fs->first_object, pager_first, 451 pager_last); 452 return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 453 } 454 455 /* 456 * The map is unchanged after our last unlock. Process the fault. 457 * 458 * The range [pager_first, pager_last] that is given to the 459 * pager is only a hint. The pager may populate any range 460 * within the object that includes the requested page index. 461 * In case the pager expanded the range, clip it to fit into 462 * the map entry. 463 */ 464 map_first = OFF_TO_IDX(fs->entry->offset); 465 if (map_first > pager_first) { 466 vm_fault_populate_cleanup(fs->first_object, pager_first, 467 map_first - 1); 468 pager_first = map_first; 469 } 470 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 471 if (map_last < pager_last) { 472 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 473 pager_last); 474 pager_last = map_last; 475 } 476 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 477 pidx <= pager_last; 478 pidx += npages, m = vm_page_next(&m[npages - 1])) { 479 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 480 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 481 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 482 psind = m->psind; 483 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 484 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 485 !pmap_ps_enabled(fs->map->pmap) || wired)) 486 psind = 0; 487 #else 488 psind = 0; 489 #endif 490 npages = atop(pagesizes[psind]); 491 for (i = 0; i < npages; i++) { 492 vm_fault_populate_check_page(&m[i]); 493 vm_fault_dirty(fs->entry, &m[i], prot, fault_type, 494 fault_flags, true); 495 } 496 VM_OBJECT_WUNLOCK(fs->first_object); 497 rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | 498 (wired ? PMAP_ENTER_WIRED : 0), psind); 499 #if defined(__amd64__) 500 if (psind > 0 && rv == KERN_FAILURE) { 501 for (i = 0; i < npages; i++) { 502 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 503 &m[i], prot, fault_type | 504 (wired ? PMAP_ENTER_WIRED : 0), 0); 505 MPASS(rv == KERN_SUCCESS); 506 } 507 } 508 #else 509 MPASS(rv == KERN_SUCCESS); 510 #endif 511 VM_OBJECT_WLOCK(fs->first_object); 512 m_mtx = NULL; 513 for (i = 0; i < npages; i++) { 514 if ((fault_flags & VM_FAULT_WIRE) != 0) { 515 vm_page_wire(&m[i]); 516 } else { 517 vm_page_change_lock(&m[i], &m_mtx); 518 vm_page_activate(&m[i]); 519 } 520 if (m_hold != NULL && m[i].pindex == fs->first_pindex) { 521 *m_hold = &m[i]; 522 vm_page_wire(&m[i]); 523 } 524 vm_page_xunbusy(&m[i]); 525 } 526 if (m_mtx != NULL) 527 mtx_unlock(m_mtx); 528 } 529 curthread->td_ru.ru_majflt++; 530 return (KERN_SUCCESS); 531 } 532 533 static int prot_fault_translation; 534 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 535 &prot_fault_translation, 0, 536 "Control signal to deliver on protection fault"); 537 538 /* compat definition to keep common code for signal translation */ 539 #define UCODE_PAGEFLT 12 540 #ifdef T_PAGEFLT 541 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 542 #endif 543 544 /* 545 * vm_fault_trap: 546 * 547 * Handle a page fault occurring at the given address, 548 * requiring the given permissions, in the map specified. 549 * If successful, the page is inserted into the 550 * associated physical map. 551 * 552 * NOTE: the given address should be truncated to the 553 * proper page address. 554 * 555 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 556 * a standard error specifying why the fault is fatal is returned. 557 * 558 * The map in question must be referenced, and remains so. 559 * Caller may hold no locks. 560 */ 561 int 562 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 563 int fault_flags, int *signo, int *ucode) 564 { 565 int result; 566 567 MPASS(signo == NULL || ucode != NULL); 568 #ifdef KTRACE 569 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 570 ktrfault(vaddr, fault_type); 571 #endif 572 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 573 NULL); 574 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 575 result == KERN_INVALID_ADDRESS || 576 result == KERN_RESOURCE_SHORTAGE || 577 result == KERN_PROTECTION_FAILURE || 578 result == KERN_OUT_OF_BOUNDS, 579 ("Unexpected Mach error %d from vm_fault()", result)); 580 #ifdef KTRACE 581 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 582 ktrfaultend(result); 583 #endif 584 if (result != KERN_SUCCESS && signo != NULL) { 585 switch (result) { 586 case KERN_FAILURE: 587 case KERN_INVALID_ADDRESS: 588 *signo = SIGSEGV; 589 *ucode = SEGV_MAPERR; 590 break; 591 case KERN_RESOURCE_SHORTAGE: 592 *signo = SIGBUS; 593 *ucode = BUS_OOMERR; 594 break; 595 case KERN_OUT_OF_BOUNDS: 596 *signo = SIGBUS; 597 *ucode = BUS_OBJERR; 598 break; 599 case KERN_PROTECTION_FAILURE: 600 if (prot_fault_translation == 0) { 601 /* 602 * Autodetect. This check also covers 603 * the images without the ABI-tag ELF 604 * note. 605 */ 606 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 607 curproc->p_osrel >= P_OSREL_SIGSEGV) { 608 *signo = SIGSEGV; 609 *ucode = SEGV_ACCERR; 610 } else { 611 *signo = SIGBUS; 612 *ucode = UCODE_PAGEFLT; 613 } 614 } else if (prot_fault_translation == 1) { 615 /* Always compat mode. */ 616 *signo = SIGBUS; 617 *ucode = UCODE_PAGEFLT; 618 } else { 619 /* Always SIGSEGV mode. */ 620 *signo = SIGSEGV; 621 *ucode = SEGV_ACCERR; 622 } 623 break; 624 default: 625 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 626 result)); 627 break; 628 } 629 } 630 return (result); 631 } 632 633 static int 634 vm_fault_lock_vnode(struct faultstate *fs) 635 { 636 struct vnode *vp; 637 int error, locked; 638 639 if (fs->object->type != OBJT_VNODE) 640 return (KERN_SUCCESS); 641 vp = fs->object->handle; 642 if (vp == fs->vp) { 643 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 644 return (KERN_SUCCESS); 645 } 646 647 /* 648 * Perform an unlock in case the desired vnode changed while 649 * the map was unlocked during a retry. 650 */ 651 unlock_vp(fs); 652 653 locked = VOP_ISLOCKED(vp); 654 if (locked != LK_EXCLUSIVE) 655 locked = LK_SHARED; 656 657 /* 658 * We must not sleep acquiring the vnode lock while we have 659 * the page exclusive busied or the object's 660 * paging-in-progress count incremented. Otherwise, we could 661 * deadlock. 662 */ 663 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); 664 if (error == 0) { 665 fs->vp = vp; 666 return (KERN_SUCCESS); 667 } 668 669 vhold(vp); 670 release_page(fs); 671 unlock_and_deallocate(fs); 672 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); 673 vdrop(vp); 674 fs->vp = vp; 675 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 676 return (KERN_RESOURCE_SHORTAGE); 677 } 678 679 int 680 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 681 int fault_flags, vm_page_t *m_hold) 682 { 683 struct faultstate fs; 684 struct domainset *dset; 685 vm_object_t next_object, retry_object; 686 vm_offset_t e_end, e_start; 687 vm_pindex_t retry_pindex; 688 vm_prot_t prot, retry_prot; 689 int ahead, alloc_req, behind, cluster_offset, era, faultcount; 690 int nera, oom, result, rv; 691 u_char behavior; 692 boolean_t wired; /* Passed by reference. */ 693 bool dead, hardfault, is_first_object_locked; 694 695 VM_CNT_INC(v_vm_faults); 696 697 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 698 return (KERN_PROTECTION_FAILURE); 699 700 fs.vp = NULL; 701 faultcount = 0; 702 nera = -1; 703 hardfault = false; 704 705 RetryFault: 706 oom = 0; 707 RetryFault_oom: 708 709 /* 710 * Find the backing store object and offset into it to begin the 711 * search. 712 */ 713 fs.map = map; 714 result = vm_map_lookup(&fs.map, vaddr, fault_type | 715 VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object, 716 &fs.first_pindex, &prot, &wired); 717 if (result != KERN_SUCCESS) { 718 unlock_vp(&fs); 719 return (result); 720 } 721 722 fs.map_generation = fs.map->timestamp; 723 724 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 725 panic("%s: fault on nofault entry, addr: %#lx", 726 __func__, (u_long)vaddr); 727 } 728 729 if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION && 730 fs.entry->wiring_thread != curthread) { 731 vm_map_unlock_read(fs.map); 732 vm_map_lock(fs.map); 733 if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && 734 (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 735 unlock_vp(&fs); 736 fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 737 vm_map_unlock_and_wait(fs.map, 0); 738 } else 739 vm_map_unlock(fs.map); 740 goto RetryFault; 741 } 742 743 MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0); 744 745 if (wired) 746 fault_type = prot | (fault_type & VM_PROT_COPY); 747 else 748 KASSERT((fault_flags & VM_FAULT_WIRE) == 0, 749 ("!wired && VM_FAULT_WIRE")); 750 751 /* 752 * Try to avoid lock contention on the top-level object through 753 * special-case handling of some types of page faults, specifically, 754 * those that are both (1) mapping an existing page from the top- 755 * level object and (2) not having to mark that object as containing 756 * dirty pages. Under these conditions, a read lock on the top-level 757 * object suffices, allowing multiple page faults of a similar type to 758 * run in parallel on the same top-level object. 759 */ 760 if (fs.vp == NULL /* avoid locked vnode leak */ && 761 (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 && 762 /* avoid calling vm_object_set_writeable_dirty() */ 763 ((prot & VM_PROT_WRITE) == 0 || 764 (fs.first_object->type != OBJT_VNODE && 765 (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 766 (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) { 767 VM_OBJECT_RLOCK(fs.first_object); 768 if ((prot & VM_PROT_WRITE) == 0 || 769 (fs.first_object->type != OBJT_VNODE && 770 (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 771 (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) { 772 rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type, 773 fault_flags, wired, m_hold); 774 if (rv == KERN_SUCCESS) 775 return (rv); 776 } 777 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 778 VM_OBJECT_RUNLOCK(fs.first_object); 779 VM_OBJECT_WLOCK(fs.first_object); 780 } 781 } else { 782 VM_OBJECT_WLOCK(fs.first_object); 783 } 784 785 /* 786 * Make a reference to this object to prevent its disposal while we 787 * are messing with it. Once we have the reference, the map is free 788 * to be diddled. Since objects reference their shadows (and copies), 789 * they will stay around as well. 790 * 791 * Bump the paging-in-progress count to prevent size changes (e.g. 792 * truncation operations) during I/O. 793 */ 794 vm_object_reference_locked(fs.first_object); 795 vm_object_pip_add(fs.first_object, 1); 796 797 fs.lookup_still_valid = true; 798 799 fs.first_m = NULL; 800 801 /* 802 * Search for the page at object/offset. 803 */ 804 fs.object = fs.first_object; 805 fs.pindex = fs.first_pindex; 806 while (TRUE) { 807 /* 808 * If the object is marked for imminent termination, 809 * we retry here, since the collapse pass has raced 810 * with us. Otherwise, if we see terminally dead 811 * object, return fail. 812 */ 813 if ((fs.object->flags & OBJ_DEAD) != 0) { 814 dead = fs.object->type == OBJT_DEAD; 815 unlock_and_deallocate(&fs); 816 if (dead) 817 return (KERN_PROTECTION_FAILURE); 818 pause("vmf_de", 1); 819 goto RetryFault; 820 } 821 822 /* 823 * See if page is resident 824 */ 825 fs.m = vm_page_lookup(fs.object, fs.pindex); 826 if (fs.m != NULL) { 827 /* 828 * Wait/Retry if the page is busy. We have to do this 829 * if the page is either exclusive or shared busy 830 * because the vm_pager may be using read busy for 831 * pageouts (and even pageins if it is the vnode 832 * pager), and we could end up trying to pagein and 833 * pageout the same page simultaneously. 834 * 835 * We can theoretically allow the busy case on a read 836 * fault if the page is marked valid, but since such 837 * pages are typically already pmap'd, putting that 838 * special case in might be more effort then it is 839 * worth. We cannot under any circumstances mess 840 * around with a shared busied page except, perhaps, 841 * to pmap it. 842 */ 843 if (vm_page_tryxbusy(fs.m) == 0) { 844 /* 845 * Reference the page before unlocking and 846 * sleeping so that the page daemon is less 847 * likely to reclaim it. 848 */ 849 vm_page_aflag_set(fs.m, PGA_REFERENCED); 850 if (fs.object != fs.first_object) { 851 if (!VM_OBJECT_TRYWLOCK( 852 fs.first_object)) { 853 VM_OBJECT_WUNLOCK(fs.object); 854 VM_OBJECT_WLOCK(fs.first_object); 855 VM_OBJECT_WLOCK(fs.object); 856 } 857 vm_page_free(fs.first_m); 858 vm_object_pip_wakeup(fs.first_object); 859 VM_OBJECT_WUNLOCK(fs.first_object); 860 fs.first_m = NULL; 861 } 862 unlock_map(&fs); 863 if (fs.m == vm_page_lookup(fs.object, 864 fs.pindex)) { 865 vm_page_sleep_if_busy(fs.m, "vmpfw"); 866 } 867 vm_object_pip_wakeup(fs.object); 868 VM_OBJECT_WUNLOCK(fs.object); 869 VM_CNT_INC(v_intrans); 870 vm_object_deallocate(fs.first_object); 871 goto RetryFault; 872 } 873 874 /* 875 * The page is marked busy for other processes and the 876 * pagedaemon. If it still isn't completely valid 877 * (readable), jump to readrest, else break-out ( we 878 * found the page ). 879 */ 880 if (!vm_page_all_valid(fs.m)) 881 goto readrest; 882 break; /* break to PAGE HAS BEEN FOUND */ 883 } 884 KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); 885 886 /* 887 * Page is not resident. If the pager might contain the page 888 * or this is the beginning of the search, allocate a new 889 * page. (Default objects are zero-fill, so there is no real 890 * pager for them.) 891 */ 892 if (fs.object->type != OBJT_DEFAULT || 893 fs.object == fs.first_object) { 894 if ((fs.object->flags & OBJ_SIZEVNLOCK) != 0) { 895 rv = vm_fault_lock_vnode(&fs); 896 MPASS(rv == KERN_SUCCESS || 897 rv == KERN_RESOURCE_SHORTAGE); 898 if (rv == KERN_RESOURCE_SHORTAGE) 899 goto RetryFault; 900 } 901 if (fs.pindex >= fs.object->size) { 902 unlock_and_deallocate(&fs); 903 return (KERN_OUT_OF_BOUNDS); 904 } 905 906 if (fs.object == fs.first_object && 907 (fs.first_object->flags & OBJ_POPULATE) != 0 && 908 fs.first_object->shadow_count == 0) { 909 rv = vm_fault_populate(&fs, prot, fault_type, 910 fault_flags, wired, m_hold); 911 switch (rv) { 912 case KERN_SUCCESS: 913 case KERN_FAILURE: 914 unlock_and_deallocate(&fs); 915 return (rv); 916 case KERN_RESOURCE_SHORTAGE: 917 unlock_and_deallocate(&fs); 918 goto RetryFault; 919 case KERN_NOT_RECEIVER: 920 /* 921 * Pager's populate() method 922 * returned VM_PAGER_BAD. 923 */ 924 break; 925 default: 926 panic("inconsistent return codes"); 927 } 928 } 929 930 /* 931 * Allocate a new page for this object/offset pair. 932 * 933 * Unlocked read of the p_flag is harmless. At 934 * worst, the P_KILLED might be not observed 935 * there, and allocation can fail, causing 936 * restart and new reading of the p_flag. 937 */ 938 dset = fs.object->domain.dr_policy; 939 if (dset == NULL) 940 dset = curthread->td_domain.dr_policy; 941 if (!vm_page_count_severe_set(&dset->ds_mask) || 942 P_KILLED(curproc)) { 943 #if VM_NRESERVLEVEL > 0 944 vm_object_color(fs.object, atop(vaddr) - 945 fs.pindex); 946 #endif 947 alloc_req = P_KILLED(curproc) ? 948 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 949 if (fs.object->type != OBJT_VNODE && 950 fs.object->backing_object == NULL) 951 alloc_req |= VM_ALLOC_ZERO; 952 fs.m = vm_page_alloc(fs.object, fs.pindex, 953 alloc_req); 954 } 955 if (fs.m == NULL) { 956 unlock_and_deallocate(&fs); 957 if (vm_pfault_oom_attempts < 0 || 958 oom < vm_pfault_oom_attempts) { 959 oom++; 960 vm_waitpfault(dset, 961 vm_pfault_oom_wait * hz); 962 goto RetryFault_oom; 963 } 964 if (bootverbose) 965 printf( 966 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 967 curproc->p_pid, curproc->p_comm); 968 vm_pageout_oom(VM_OOM_MEM_PF); 969 goto RetryFault; 970 } 971 } 972 973 readrest: 974 /* 975 * At this point, we have either allocated a new page or found 976 * an existing page that is only partially valid. 977 * 978 * We hold a reference on the current object and the page is 979 * exclusive busied. 980 */ 981 982 /* 983 * If the pager for the current object might have the page, 984 * then determine the number of additional pages to read and 985 * potentially reprioritize previously read pages for earlier 986 * reclamation. These operations should only be performed 987 * once per page fault. Even if the current pager doesn't 988 * have the page, the number of additional pages to read will 989 * apply to subsequent objects in the shadow chain. 990 */ 991 if (fs.object->type != OBJT_DEFAULT && nera == -1 && 992 !P_KILLED(curproc)) { 993 KASSERT(fs.lookup_still_valid, ("map unlocked")); 994 era = fs.entry->read_ahead; 995 behavior = vm_map_entry_behavior(fs.entry); 996 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 997 nera = 0; 998 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 999 nera = VM_FAULT_READ_AHEAD_MAX; 1000 if (vaddr == fs.entry->next_read) 1001 vm_fault_dontneed(&fs, vaddr, nera); 1002 } else if (vaddr == fs.entry->next_read) { 1003 /* 1004 * This is a sequential fault. Arithmetically 1005 * increase the requested number of pages in 1006 * the read-ahead window. The requested 1007 * number of pages is "# of sequential faults 1008 * x (read ahead min + 1) + read ahead min" 1009 */ 1010 nera = VM_FAULT_READ_AHEAD_MIN; 1011 if (era > 0) { 1012 nera += era + 1; 1013 if (nera > VM_FAULT_READ_AHEAD_MAX) 1014 nera = VM_FAULT_READ_AHEAD_MAX; 1015 } 1016 if (era == VM_FAULT_READ_AHEAD_MAX) 1017 vm_fault_dontneed(&fs, vaddr, nera); 1018 } else { 1019 /* 1020 * This is a non-sequential fault. 1021 */ 1022 nera = 0; 1023 } 1024 if (era != nera) { 1025 /* 1026 * A read lock on the map suffices to update 1027 * the read ahead count safely. 1028 */ 1029 fs.entry->read_ahead = nera; 1030 } 1031 1032 /* 1033 * Prepare for unlocking the map. Save the map 1034 * entry's start and end addresses, which are used to 1035 * optimize the size of the pager operation below. 1036 * Even if the map entry's addresses change after 1037 * unlocking the map, using the saved addresses is 1038 * safe. 1039 */ 1040 e_start = fs.entry->start; 1041 e_end = fs.entry->end; 1042 } 1043 1044 /* 1045 * Call the pager to retrieve the page if there is a chance 1046 * that the pager has it, and potentially retrieve additional 1047 * pages at the same time. 1048 */ 1049 if (fs.object->type != OBJT_DEFAULT) { 1050 /* 1051 * Release the map lock before locking the vnode or 1052 * sleeping in the pager. (If the current object has 1053 * a shadow, then an earlier iteration of this loop 1054 * may have already unlocked the map.) 1055 */ 1056 unlock_map(&fs); 1057 1058 rv = vm_fault_lock_vnode(&fs); 1059 MPASS(rv == KERN_SUCCESS || 1060 rv == KERN_RESOURCE_SHORTAGE); 1061 if (rv == KERN_RESOURCE_SHORTAGE) 1062 goto RetryFault; 1063 KASSERT(fs.vp == NULL || !fs.map->system_map, 1064 ("vm_fault: vnode-backed object mapped by system map")); 1065 1066 /* 1067 * Page in the requested page and hint the pager, 1068 * that it may bring up surrounding pages. 1069 */ 1070 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1071 P_KILLED(curproc)) { 1072 behind = 0; 1073 ahead = 0; 1074 } else { 1075 /* Is this a sequential fault? */ 1076 if (nera > 0) { 1077 behind = 0; 1078 ahead = nera; 1079 } else { 1080 /* 1081 * Request a cluster of pages that is 1082 * aligned to a VM_FAULT_READ_DEFAULT 1083 * page offset boundary within the 1084 * object. Alignment to a page offset 1085 * boundary is more likely to coincide 1086 * with the underlying file system 1087 * block than alignment to a virtual 1088 * address boundary. 1089 */ 1090 cluster_offset = fs.pindex % 1091 VM_FAULT_READ_DEFAULT; 1092 behind = ulmin(cluster_offset, 1093 atop(vaddr - e_start)); 1094 ahead = VM_FAULT_READ_DEFAULT - 1 - 1095 cluster_offset; 1096 } 1097 ahead = ulmin(ahead, atop(e_end - vaddr) - 1); 1098 } 1099 rv = vm_pager_get_pages(fs.object, &fs.m, 1, 1100 &behind, &ahead); 1101 if (rv == VM_PAGER_OK) { 1102 faultcount = behind + 1 + ahead; 1103 hardfault = true; 1104 break; /* break to PAGE HAS BEEN FOUND */ 1105 } 1106 if (rv == VM_PAGER_ERROR) 1107 printf("vm_fault: pager read error, pid %d (%s)\n", 1108 curproc->p_pid, curproc->p_comm); 1109 1110 /* 1111 * If an I/O error occurred or the requested page was 1112 * outside the range of the pager, clean up and return 1113 * an error. 1114 */ 1115 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1116 if (!vm_page_wired(fs.m)) 1117 vm_page_free(fs.m); 1118 else 1119 vm_page_xunbusy(fs.m); 1120 fs.m = NULL; 1121 unlock_and_deallocate(&fs); 1122 return (KERN_OUT_OF_BOUNDS); 1123 } 1124 1125 /* 1126 * The requested page does not exist at this object/ 1127 * offset. Remove the invalid page from the object, 1128 * waking up anyone waiting for it, and continue on to 1129 * the next object. However, if this is the top-level 1130 * object, we must leave the busy page in place to 1131 * prevent another process from rushing past us, and 1132 * inserting the page in that object at the same time 1133 * that we are. 1134 */ 1135 if (fs.object != fs.first_object) { 1136 if (!vm_page_wired(fs.m)) 1137 vm_page_free(fs.m); 1138 else 1139 vm_page_xunbusy(fs.m); 1140 fs.m = NULL; 1141 } 1142 } 1143 1144 /* 1145 * We get here if the object has default pager (or unwiring) 1146 * or the pager doesn't have the page. 1147 */ 1148 if (fs.object == fs.first_object) 1149 fs.first_m = fs.m; 1150 1151 /* 1152 * Move on to the next object. Lock the next object before 1153 * unlocking the current one. 1154 */ 1155 next_object = fs.object->backing_object; 1156 if (next_object == NULL) { 1157 /* 1158 * If there's no object left, fill the page in the top 1159 * object with zeros. 1160 */ 1161 if (fs.object != fs.first_object) { 1162 vm_object_pip_wakeup(fs.object); 1163 VM_OBJECT_WUNLOCK(fs.object); 1164 1165 fs.object = fs.first_object; 1166 fs.pindex = fs.first_pindex; 1167 fs.m = fs.first_m; 1168 VM_OBJECT_WLOCK(fs.object); 1169 } 1170 fs.first_m = NULL; 1171 1172 /* 1173 * Zero the page if necessary and mark it valid. 1174 */ 1175 if ((fs.m->flags & PG_ZERO) == 0) { 1176 pmap_zero_page(fs.m); 1177 } else { 1178 VM_CNT_INC(v_ozfod); 1179 } 1180 VM_CNT_INC(v_zfod); 1181 vm_page_valid(fs.m); 1182 /* Don't try to prefault neighboring pages. */ 1183 faultcount = 1; 1184 break; /* break to PAGE HAS BEEN FOUND */ 1185 } else { 1186 KASSERT(fs.object != next_object, 1187 ("object loop %p", next_object)); 1188 VM_OBJECT_WLOCK(next_object); 1189 vm_object_pip_add(next_object, 1); 1190 if (fs.object != fs.first_object) 1191 vm_object_pip_wakeup(fs.object); 1192 fs.pindex += 1193 OFF_TO_IDX(fs.object->backing_object_offset); 1194 VM_OBJECT_WUNLOCK(fs.object); 1195 fs.object = next_object; 1196 } 1197 } 1198 1199 vm_page_assert_xbusied(fs.m); 1200 1201 /* 1202 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 1203 * is held.] 1204 */ 1205 1206 /* 1207 * If the page is being written, but isn't already owned by the 1208 * top-level object, we have to copy it into a new page owned by the 1209 * top-level object. 1210 */ 1211 if (fs.object != fs.first_object) { 1212 /* 1213 * We only really need to copy if we want to write it. 1214 */ 1215 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1216 /* 1217 * This allows pages to be virtually copied from a 1218 * backing_object into the first_object, where the 1219 * backing object has no other refs to it, and cannot 1220 * gain any more refs. Instead of a bcopy, we just 1221 * move the page from the backing object to the 1222 * first object. Note that we must mark the page 1223 * dirty in the first object so that it will go out 1224 * to swap when needed. 1225 */ 1226 is_first_object_locked = false; 1227 if ( 1228 /* 1229 * Only one shadow object 1230 */ 1231 (fs.object->shadow_count == 1) && 1232 /* 1233 * No COW refs, except us 1234 */ 1235 (fs.object->ref_count == 1) && 1236 /* 1237 * No one else can look this object up 1238 */ 1239 (fs.object->handle == NULL) && 1240 /* 1241 * No other ways to look the object up 1242 */ 1243 ((fs.object->type == OBJT_DEFAULT) || 1244 (fs.object->type == OBJT_SWAP)) && 1245 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && 1246 /* 1247 * We don't chase down the shadow chain 1248 */ 1249 fs.object == fs.first_object->backing_object) { 1250 1251 (void)vm_page_remove(fs.m); 1252 vm_page_replace_checked(fs.m, fs.first_object, 1253 fs.first_pindex, fs.first_m); 1254 vm_page_free(fs.first_m); 1255 vm_page_dirty(fs.m); 1256 #if VM_NRESERVLEVEL > 0 1257 /* 1258 * Rename the reservation. 1259 */ 1260 vm_reserv_rename(fs.m, fs.first_object, 1261 fs.object, OFF_TO_IDX( 1262 fs.first_object->backing_object_offset)); 1263 #endif 1264 fs.first_m = fs.m; 1265 fs.m = NULL; 1266 VM_CNT_INC(v_cow_optim); 1267 } else { 1268 /* 1269 * Oh, well, lets copy it. 1270 */ 1271 pmap_copy_page(fs.m, fs.first_m); 1272 vm_page_valid(fs.first_m); 1273 if (wired && (fault_flags & 1274 VM_FAULT_WIRE) == 0) { 1275 vm_page_wire(fs.first_m); 1276 vm_page_unwire(fs.m, PQ_INACTIVE); 1277 } 1278 /* 1279 * We no longer need the old page or object. 1280 */ 1281 release_page(&fs); 1282 } 1283 /* 1284 * fs.object != fs.first_object due to above 1285 * conditional 1286 */ 1287 vm_object_pip_wakeup(fs.object); 1288 VM_OBJECT_WUNLOCK(fs.object); 1289 1290 /* 1291 * We only try to prefault read-only mappings to the 1292 * neighboring pages when this copy-on-write fault is 1293 * a hard fault. In other cases, trying to prefault 1294 * is typically wasted effort. 1295 */ 1296 if (faultcount == 0) 1297 faultcount = 1; 1298 1299 /* 1300 * Only use the new page below... 1301 */ 1302 fs.object = fs.first_object; 1303 fs.pindex = fs.first_pindex; 1304 fs.m = fs.first_m; 1305 if (!is_first_object_locked) 1306 VM_OBJECT_WLOCK(fs.object); 1307 VM_CNT_INC(v_cow_faults); 1308 curthread->td_cow++; 1309 } else { 1310 prot &= ~VM_PROT_WRITE; 1311 } 1312 } 1313 1314 /* 1315 * We must verify that the maps have not changed since our last 1316 * lookup. 1317 */ 1318 if (!fs.lookup_still_valid) { 1319 if (!vm_map_trylock_read(fs.map)) { 1320 release_page(&fs); 1321 unlock_and_deallocate(&fs); 1322 goto RetryFault; 1323 } 1324 fs.lookup_still_valid = true; 1325 if (fs.map->timestamp != fs.map_generation) { 1326 result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 1327 &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 1328 1329 /* 1330 * If we don't need the page any longer, put it on the inactive 1331 * list (the easiest thing to do here). If no one needs it, 1332 * pageout will grab it eventually. 1333 */ 1334 if (result != KERN_SUCCESS) { 1335 release_page(&fs); 1336 unlock_and_deallocate(&fs); 1337 1338 /* 1339 * If retry of map lookup would have blocked then 1340 * retry fault from start. 1341 */ 1342 if (result == KERN_FAILURE) 1343 goto RetryFault; 1344 return (result); 1345 } 1346 if ((retry_object != fs.first_object) || 1347 (retry_pindex != fs.first_pindex)) { 1348 release_page(&fs); 1349 unlock_and_deallocate(&fs); 1350 goto RetryFault; 1351 } 1352 1353 /* 1354 * Check whether the protection has changed or the object has 1355 * been copied while we left the map unlocked. Changing from 1356 * read to write permission is OK - we leave the page 1357 * write-protected, and catch the write fault. Changing from 1358 * write to read permission means that we can't mark the page 1359 * write-enabled after all. 1360 */ 1361 prot &= retry_prot; 1362 fault_type &= retry_prot; 1363 if (prot == 0) { 1364 release_page(&fs); 1365 unlock_and_deallocate(&fs); 1366 goto RetryFault; 1367 } 1368 1369 /* Reassert because wired may have changed. */ 1370 KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0, 1371 ("!wired && VM_FAULT_WIRE")); 1372 } 1373 } 1374 1375 /* 1376 * If the page was filled by a pager, save the virtual address that 1377 * should be faulted on next under a sequential access pattern to the 1378 * map entry. A read lock on the map suffices to update this address 1379 * safely. 1380 */ 1381 if (hardfault) 1382 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1383 1384 vm_page_assert_xbusied(fs.m); 1385 vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true); 1386 1387 /* 1388 * Page must be completely valid or it is not fit to 1389 * map into user space. vm_pager_get_pages() ensures this. 1390 */ 1391 KASSERT(vm_page_all_valid(fs.m), 1392 ("vm_fault: page %p partially invalid", fs.m)); 1393 VM_OBJECT_WUNLOCK(fs.object); 1394 1395 /* 1396 * Put this page into the physical map. We had to do the unlock above 1397 * because pmap_enter() may sleep. We don't put the page 1398 * back on the active queue until later so that the pageout daemon 1399 * won't find it (yet). 1400 */ 1401 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, 1402 fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); 1403 if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && 1404 wired == 0) 1405 vm_fault_prefault(&fs, vaddr, 1406 faultcount > 0 ? behind : PFBAK, 1407 faultcount > 0 ? ahead : PFFOR, false); 1408 VM_OBJECT_WLOCK(fs.object); 1409 1410 /* 1411 * If the page is not wired down, then put it where the pageout daemon 1412 * can find it. 1413 */ 1414 if ((fault_flags & VM_FAULT_WIRE) != 0) { 1415 vm_page_wire(fs.m); 1416 } else { 1417 vm_page_lock(fs.m); 1418 vm_page_activate(fs.m); 1419 vm_page_unlock(fs.m); 1420 } 1421 if (m_hold != NULL) { 1422 *m_hold = fs.m; 1423 vm_page_wire(fs.m); 1424 } 1425 vm_page_xunbusy(fs.m); 1426 1427 /* 1428 * Unlock everything, and return 1429 */ 1430 unlock_and_deallocate(&fs); 1431 if (hardfault) { 1432 VM_CNT_INC(v_io_faults); 1433 curthread->td_ru.ru_majflt++; 1434 #ifdef RACCT 1435 if (racct_enable && fs.object->type == OBJT_VNODE) { 1436 PROC_LOCK(curproc); 1437 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1438 racct_add_force(curproc, RACCT_WRITEBPS, 1439 PAGE_SIZE + behind * PAGE_SIZE); 1440 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1441 } else { 1442 racct_add_force(curproc, RACCT_READBPS, 1443 PAGE_SIZE + ahead * PAGE_SIZE); 1444 racct_add_force(curproc, RACCT_READIOPS, 1); 1445 } 1446 PROC_UNLOCK(curproc); 1447 } 1448 #endif 1449 } else 1450 curthread->td_ru.ru_minflt++; 1451 1452 return (KERN_SUCCESS); 1453 } 1454 1455 /* 1456 * Speed up the reclamation of pages that precede the faulting pindex within 1457 * the first object of the shadow chain. Essentially, perform the equivalent 1458 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1459 * the faulting pindex by the cluster size when the pages read by vm_fault() 1460 * cross a cluster-size boundary. The cluster size is the greater of the 1461 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1462 * 1463 * When "fs->first_object" is a shadow object, the pages in the backing object 1464 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1465 * function must only be concerned with pages in the first object. 1466 */ 1467 static void 1468 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1469 { 1470 vm_map_entry_t entry; 1471 vm_object_t first_object, object; 1472 vm_offset_t end, start; 1473 vm_page_t m, m_next; 1474 vm_pindex_t pend, pstart; 1475 vm_size_t size; 1476 1477 object = fs->object; 1478 VM_OBJECT_ASSERT_WLOCKED(object); 1479 first_object = fs->first_object; 1480 if (first_object != object) { 1481 if (!VM_OBJECT_TRYWLOCK(first_object)) { 1482 VM_OBJECT_WUNLOCK(object); 1483 VM_OBJECT_WLOCK(first_object); 1484 VM_OBJECT_WLOCK(object); 1485 } 1486 } 1487 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1488 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1489 size = VM_FAULT_DONTNEED_MIN; 1490 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1491 size = pagesizes[1]; 1492 end = rounddown2(vaddr, size); 1493 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1494 (entry = fs->entry)->start < end) { 1495 if (end - entry->start < size) 1496 start = entry->start; 1497 else 1498 start = end - size; 1499 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1500 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1501 entry->start); 1502 m_next = vm_page_find_least(first_object, pstart); 1503 pend = OFF_TO_IDX(entry->offset) + atop(end - 1504 entry->start); 1505 while ((m = m_next) != NULL && m->pindex < pend) { 1506 m_next = TAILQ_NEXT(m, listq); 1507 if (!vm_page_all_valid(m) || 1508 vm_page_busied(m)) 1509 continue; 1510 1511 /* 1512 * Don't clear PGA_REFERENCED, since it would 1513 * likely represent a reference by a different 1514 * process. 1515 * 1516 * Typically, at this point, prefetched pages 1517 * are still in the inactive queue. Only 1518 * pages that triggered page faults are in the 1519 * active queue. 1520 */ 1521 vm_page_lock(m); 1522 if (!vm_page_inactive(m)) 1523 vm_page_deactivate(m); 1524 vm_page_unlock(m); 1525 } 1526 } 1527 } 1528 if (first_object != object) 1529 VM_OBJECT_WUNLOCK(first_object); 1530 } 1531 1532 /* 1533 * vm_fault_prefault provides a quick way of clustering 1534 * pagefaults into a processes address space. It is a "cousin" 1535 * of vm_map_pmap_enter, except it runs at page fault time instead 1536 * of mmap time. 1537 */ 1538 static void 1539 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1540 int backward, int forward, bool obj_locked) 1541 { 1542 pmap_t pmap; 1543 vm_map_entry_t entry; 1544 vm_object_t backing_object, lobject; 1545 vm_offset_t addr, starta; 1546 vm_pindex_t pindex; 1547 vm_page_t m; 1548 int i; 1549 1550 pmap = fs->map->pmap; 1551 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1552 return; 1553 1554 entry = fs->entry; 1555 1556 if (addra < backward * PAGE_SIZE) { 1557 starta = entry->start; 1558 } else { 1559 starta = addra - backward * PAGE_SIZE; 1560 if (starta < entry->start) 1561 starta = entry->start; 1562 } 1563 1564 /* 1565 * Generate the sequence of virtual addresses that are candidates for 1566 * prefaulting in an outward spiral from the faulting virtual address, 1567 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1568 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1569 * If the candidate address doesn't have a backing physical page, then 1570 * the loop immediately terminates. 1571 */ 1572 for (i = 0; i < 2 * imax(backward, forward); i++) { 1573 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1574 PAGE_SIZE); 1575 if (addr > addra + forward * PAGE_SIZE) 1576 addr = 0; 1577 1578 if (addr < starta || addr >= entry->end) 1579 continue; 1580 1581 if (!pmap_is_prefaultable(pmap, addr)) 1582 continue; 1583 1584 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1585 lobject = entry->object.vm_object; 1586 if (!obj_locked) 1587 VM_OBJECT_RLOCK(lobject); 1588 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1589 lobject->type == OBJT_DEFAULT && 1590 (backing_object = lobject->backing_object) != NULL) { 1591 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1592 0, ("vm_fault_prefault: unaligned object offset")); 1593 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1594 VM_OBJECT_RLOCK(backing_object); 1595 if (!obj_locked || lobject != entry->object.vm_object) 1596 VM_OBJECT_RUNLOCK(lobject); 1597 lobject = backing_object; 1598 } 1599 if (m == NULL) { 1600 if (!obj_locked || lobject != entry->object.vm_object) 1601 VM_OBJECT_RUNLOCK(lobject); 1602 break; 1603 } 1604 if (vm_page_all_valid(m) && 1605 (m->flags & PG_FICTITIOUS) == 0) 1606 pmap_enter_quick(pmap, addr, m, entry->protection); 1607 if (!obj_locked || lobject != entry->object.vm_object) 1608 VM_OBJECT_RUNLOCK(lobject); 1609 } 1610 } 1611 1612 /* 1613 * Hold each of the physical pages that are mapped by the specified range of 1614 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1615 * and allow the specified types of access, "prot". If all of the implied 1616 * pages are successfully held, then the number of held pages is returned 1617 * together with pointers to those pages in the array "ma". However, if any 1618 * of the pages cannot be held, -1 is returned. 1619 */ 1620 int 1621 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1622 vm_prot_t prot, vm_page_t *ma, int max_count) 1623 { 1624 vm_offset_t end, va; 1625 vm_page_t *mp; 1626 int count; 1627 boolean_t pmap_failed; 1628 1629 if (len == 0) 1630 return (0); 1631 end = round_page(addr + len); 1632 addr = trunc_page(addr); 1633 1634 /* 1635 * Check for illegal addresses. 1636 */ 1637 if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) 1638 return (-1); 1639 1640 if (atop(end - addr) > max_count) 1641 panic("vm_fault_quick_hold_pages: count > max_count"); 1642 count = atop(end - addr); 1643 1644 /* 1645 * Most likely, the physical pages are resident in the pmap, so it is 1646 * faster to try pmap_extract_and_hold() first. 1647 */ 1648 pmap_failed = FALSE; 1649 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1650 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1651 if (*mp == NULL) 1652 pmap_failed = TRUE; 1653 else if ((prot & VM_PROT_WRITE) != 0 && 1654 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1655 /* 1656 * Explicitly dirty the physical page. Otherwise, the 1657 * caller's changes may go unnoticed because they are 1658 * performed through an unmanaged mapping or by a DMA 1659 * operation. 1660 * 1661 * The object lock is not held here. 1662 * See vm_page_clear_dirty_mask(). 1663 */ 1664 vm_page_dirty(*mp); 1665 } 1666 } 1667 if (pmap_failed) { 1668 /* 1669 * One or more pages could not be held by the pmap. Either no 1670 * page was mapped at the specified virtual address or that 1671 * mapping had insufficient permissions. Attempt to fault in 1672 * and hold these pages. 1673 * 1674 * If vm_fault_disable_pagefaults() was called, 1675 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1676 * acquire MD VM locks, which means we must not call 1677 * vm_fault(). Some (out of tree) callers mark 1678 * too wide a code area with vm_fault_disable_pagefaults() 1679 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1680 * the proper behaviour explicitly. 1681 */ 1682 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1683 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1684 goto error; 1685 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1686 if (*mp == NULL && vm_fault(map, va, prot, 1687 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1688 goto error; 1689 } 1690 return (count); 1691 error: 1692 for (mp = ma; mp < ma + count; mp++) 1693 if (*mp != NULL) 1694 vm_page_unwire(*mp, PQ_INACTIVE); 1695 return (-1); 1696 } 1697 1698 /* 1699 * Routine: 1700 * vm_fault_copy_entry 1701 * Function: 1702 * Create new shadow object backing dst_entry with private copy of 1703 * all underlying pages. When src_entry is equal to dst_entry, 1704 * function implements COW for wired-down map entry. Otherwise, 1705 * it forks wired entry into dst_map. 1706 * 1707 * In/out conditions: 1708 * The source and destination maps must be locked for write. 1709 * The source map entry must be wired down (or be a sharing map 1710 * entry corresponding to a main map entry that is wired down). 1711 */ 1712 void 1713 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1714 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1715 vm_ooffset_t *fork_charge) 1716 { 1717 vm_object_t backing_object, dst_object, object, src_object; 1718 vm_pindex_t dst_pindex, pindex, src_pindex; 1719 vm_prot_t access, prot; 1720 vm_offset_t vaddr; 1721 vm_page_t dst_m; 1722 vm_page_t src_m; 1723 boolean_t upgrade; 1724 1725 #ifdef lint 1726 src_map++; 1727 #endif /* lint */ 1728 1729 upgrade = src_entry == dst_entry; 1730 access = prot = dst_entry->protection; 1731 1732 src_object = src_entry->object.vm_object; 1733 src_pindex = OFF_TO_IDX(src_entry->offset); 1734 1735 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1736 dst_object = src_object; 1737 vm_object_reference(dst_object); 1738 } else { 1739 /* 1740 * Create the top-level object for the destination entry. (Doesn't 1741 * actually shadow anything - we copy the pages directly.) 1742 */ 1743 dst_object = vm_object_allocate(OBJT_DEFAULT, 1744 atop(dst_entry->end - dst_entry->start)); 1745 #if VM_NRESERVLEVEL > 0 1746 dst_object->flags |= OBJ_COLORED; 1747 dst_object->pg_color = atop(dst_entry->start); 1748 #endif 1749 dst_object->domain = src_object->domain; 1750 dst_object->charge = dst_entry->end - dst_entry->start; 1751 } 1752 1753 VM_OBJECT_WLOCK(dst_object); 1754 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1755 ("vm_fault_copy_entry: vm_object not NULL")); 1756 if (src_object != dst_object) { 1757 dst_entry->object.vm_object = dst_object; 1758 dst_entry->offset = 0; 1759 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1760 } 1761 if (fork_charge != NULL) { 1762 KASSERT(dst_entry->cred == NULL, 1763 ("vm_fault_copy_entry: leaked swp charge")); 1764 dst_object->cred = curthread->td_ucred; 1765 crhold(dst_object->cred); 1766 *fork_charge += dst_object->charge; 1767 } else if ((dst_object->type == OBJT_DEFAULT || 1768 dst_object->type == OBJT_SWAP) && 1769 dst_object->cred == NULL) { 1770 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1771 dst_entry)); 1772 dst_object->cred = dst_entry->cred; 1773 dst_entry->cred = NULL; 1774 } 1775 1776 /* 1777 * If not an upgrade, then enter the mappings in the pmap as 1778 * read and/or execute accesses. Otherwise, enter them as 1779 * write accesses. 1780 * 1781 * A writeable large page mapping is only created if all of 1782 * the constituent small page mappings are modified. Marking 1783 * PTEs as modified on inception allows promotion to happen 1784 * without taking potentially large number of soft faults. 1785 */ 1786 if (!upgrade) 1787 access &= ~VM_PROT_WRITE; 1788 1789 /* 1790 * Loop through all of the virtual pages within the entry's 1791 * range, copying each page from the source object to the 1792 * destination object. Since the source is wired, those pages 1793 * must exist. In contrast, the destination is pageable. 1794 * Since the destination object doesn't share any backing storage 1795 * with the source object, all of its pages must be dirtied, 1796 * regardless of whether they can be written. 1797 */ 1798 for (vaddr = dst_entry->start, dst_pindex = 0; 1799 vaddr < dst_entry->end; 1800 vaddr += PAGE_SIZE, dst_pindex++) { 1801 again: 1802 /* 1803 * Find the page in the source object, and copy it in. 1804 * Because the source is wired down, the page will be 1805 * in memory. 1806 */ 1807 if (src_object != dst_object) 1808 VM_OBJECT_RLOCK(src_object); 1809 object = src_object; 1810 pindex = src_pindex + dst_pindex; 1811 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1812 (backing_object = object->backing_object) != NULL) { 1813 /* 1814 * Unless the source mapping is read-only or 1815 * it is presently being upgraded from 1816 * read-only, the first object in the shadow 1817 * chain should provide all of the pages. In 1818 * other words, this loop body should never be 1819 * executed when the source mapping is already 1820 * read/write. 1821 */ 1822 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1823 upgrade, 1824 ("vm_fault_copy_entry: main object missing page")); 1825 1826 VM_OBJECT_RLOCK(backing_object); 1827 pindex += OFF_TO_IDX(object->backing_object_offset); 1828 if (object != dst_object) 1829 VM_OBJECT_RUNLOCK(object); 1830 object = backing_object; 1831 } 1832 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1833 1834 if (object != dst_object) { 1835 /* 1836 * Allocate a page in the destination object. 1837 */ 1838 dst_m = vm_page_alloc(dst_object, (src_object == 1839 dst_object ? src_pindex : 0) + dst_pindex, 1840 VM_ALLOC_NORMAL); 1841 if (dst_m == NULL) { 1842 VM_OBJECT_WUNLOCK(dst_object); 1843 VM_OBJECT_RUNLOCK(object); 1844 vm_wait(dst_object); 1845 VM_OBJECT_WLOCK(dst_object); 1846 goto again; 1847 } 1848 pmap_copy_page(src_m, dst_m); 1849 VM_OBJECT_RUNLOCK(object); 1850 dst_m->dirty = dst_m->valid = src_m->valid; 1851 } else { 1852 dst_m = src_m; 1853 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 1854 goto again; 1855 if (dst_m->pindex >= dst_object->size) { 1856 /* 1857 * We are upgrading. Index can occur 1858 * out of bounds if the object type is 1859 * vnode and the file was truncated. 1860 */ 1861 vm_page_xunbusy(dst_m); 1862 break; 1863 } 1864 } 1865 VM_OBJECT_WUNLOCK(dst_object); 1866 1867 /* 1868 * Enter it in the pmap. If a wired, copy-on-write 1869 * mapping is being replaced by a write-enabled 1870 * mapping, then wire that new mapping. 1871 * 1872 * The page can be invalid if the user called 1873 * msync(MS_INVALIDATE) or truncated the backing vnode 1874 * or shared memory object. In this case, do not 1875 * insert it into pmap, but still do the copy so that 1876 * all copies of the wired map entry have similar 1877 * backing pages. 1878 */ 1879 if (vm_page_all_valid(dst_m)) { 1880 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 1881 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 1882 } 1883 1884 /* 1885 * Mark it no longer busy, and put it on the active list. 1886 */ 1887 VM_OBJECT_WLOCK(dst_object); 1888 1889 if (upgrade) { 1890 if (src_m != dst_m) { 1891 vm_page_unwire(src_m, PQ_INACTIVE); 1892 vm_page_wire(dst_m); 1893 } else { 1894 KASSERT(vm_page_wired(dst_m), 1895 ("dst_m %p is not wired", dst_m)); 1896 } 1897 } else { 1898 vm_page_lock(dst_m); 1899 vm_page_activate(dst_m); 1900 vm_page_unlock(dst_m); 1901 } 1902 vm_page_xunbusy(dst_m); 1903 } 1904 VM_OBJECT_WUNLOCK(dst_object); 1905 if (upgrade) { 1906 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1907 vm_object_deallocate(src_object); 1908 } 1909 } 1910 1911 /* 1912 * Block entry into the machine-independent layer's page fault handler by 1913 * the calling thread. Subsequent calls to vm_fault() by that thread will 1914 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 1915 * spurious page faults. 1916 */ 1917 int 1918 vm_fault_disable_pagefaults(void) 1919 { 1920 1921 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 1922 } 1923 1924 void 1925 vm_fault_enable_pagefaults(int save) 1926 { 1927 1928 curthread_pflags_restore(save); 1929 } 1930