1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 /* Fault parameters. */ 124 vm_offset_t vaddr; 125 vm_page_t *m_hold; 126 vm_prot_t fault_type; 127 vm_prot_t prot; 128 int fault_flags; 129 int oom; 130 boolean_t wired; 131 132 /* Page reference for cow. */ 133 vm_page_t m_cow; 134 135 /* Current object. */ 136 vm_object_t object; 137 vm_pindex_t pindex; 138 vm_page_t m; 139 140 /* Top-level map object. */ 141 vm_object_t first_object; 142 vm_pindex_t first_pindex; 143 vm_page_t first_m; 144 145 /* Map state. */ 146 vm_map_t map; 147 vm_map_entry_t entry; 148 int map_generation; 149 bool lookup_still_valid; 150 151 /* Vnode if locked. */ 152 struct vnode *vp; 153 }; 154 155 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 156 int ahead); 157 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 158 int backward, int forward, bool obj_locked); 159 160 static int vm_pfault_oom_attempts = 3; 161 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 162 &vm_pfault_oom_attempts, 0, 163 "Number of page allocation attempts in page fault handler before it " 164 "triggers OOM handling"); 165 166 static int vm_pfault_oom_wait = 10; 167 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 168 &vm_pfault_oom_wait, 0, 169 "Number of seconds to wait for free pages before retrying " 170 "the page fault handler"); 171 172 static inline void 173 fault_page_release(vm_page_t *mp) 174 { 175 vm_page_t m; 176 177 m = *mp; 178 if (m != NULL) { 179 /* 180 * We are likely to loop around again and attempt to busy 181 * this page. Deactivating it leaves it available for 182 * pageout while optimizing fault restarts. 183 */ 184 vm_page_deactivate(m); 185 vm_page_xunbusy(m); 186 *mp = NULL; 187 } 188 } 189 190 static inline void 191 fault_page_free(vm_page_t *mp) 192 { 193 vm_page_t m; 194 195 m = *mp; 196 if (m != NULL) { 197 VM_OBJECT_ASSERT_WLOCKED(m->object); 198 if (!vm_page_wired(m)) 199 vm_page_free(m); 200 else 201 vm_page_xunbusy(m); 202 *mp = NULL; 203 } 204 } 205 206 static inline void 207 unlock_map(struct faultstate *fs) 208 { 209 210 if (fs->lookup_still_valid) { 211 vm_map_lookup_done(fs->map, fs->entry); 212 fs->lookup_still_valid = false; 213 } 214 } 215 216 static void 217 unlock_vp(struct faultstate *fs) 218 { 219 220 if (fs->vp != NULL) { 221 vput(fs->vp); 222 fs->vp = NULL; 223 } 224 } 225 226 static void 227 fault_deallocate(struct faultstate *fs) 228 { 229 230 fault_page_release(&fs->m_cow); 231 fault_page_release(&fs->m); 232 vm_object_pip_wakeup(fs->object); 233 if (fs->object != fs->first_object) { 234 VM_OBJECT_WLOCK(fs->first_object); 235 fault_page_free(&fs->first_m); 236 VM_OBJECT_WUNLOCK(fs->first_object); 237 vm_object_pip_wakeup(fs->first_object); 238 } 239 vm_object_deallocate(fs->first_object); 240 unlock_map(fs); 241 unlock_vp(fs); 242 } 243 244 static void 245 unlock_and_deallocate(struct faultstate *fs) 246 { 247 248 VM_OBJECT_WUNLOCK(fs->object); 249 fault_deallocate(fs); 250 } 251 252 static void 253 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 254 { 255 bool need_dirty; 256 257 if (((fs->prot & VM_PROT_WRITE) == 0 && 258 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 259 (m->oflags & VPO_UNMANAGED) != 0) 260 return; 261 262 VM_PAGE_OBJECT_BUSY_ASSERT(m); 263 264 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 265 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 266 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 267 268 vm_object_set_writeable_dirty(m->object); 269 270 /* 271 * If the fault is a write, we know that this page is being 272 * written NOW so dirty it explicitly to save on 273 * pmap_is_modified() calls later. 274 * 275 * Also, since the page is now dirty, we can possibly tell 276 * the pager to release any swap backing the page. 277 */ 278 if (need_dirty && vm_page_set_dirty(m) == 0) { 279 /* 280 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 281 * if the page is already dirty to prevent data written with 282 * the expectation of being synced from not being synced. 283 * Likewise if this entry does not request NOSYNC then make 284 * sure the page isn't marked NOSYNC. Applications sharing 285 * data should use the same flags to avoid ping ponging. 286 */ 287 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 288 vm_page_aflag_set(m, PGA_NOSYNC); 289 else 290 vm_page_aflag_clear(m, PGA_NOSYNC); 291 } 292 293 } 294 295 /* 296 * Unlocks fs.first_object and fs.map on success. 297 */ 298 static int 299 vm_fault_soft_fast(struct faultstate *fs) 300 { 301 vm_page_t m, m_map; 302 #if VM_NRESERVLEVEL > 0 303 vm_page_t m_super; 304 int flags; 305 #endif 306 int psind, rv; 307 vm_offset_t vaddr; 308 309 MPASS(fs->vp == NULL); 310 vaddr = fs->vaddr; 311 vm_object_busy(fs->first_object); 312 m = vm_page_lookup(fs->first_object, fs->first_pindex); 313 /* A busy page can be mapped for read|execute access. */ 314 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 315 vm_page_busied(m)) || !vm_page_all_valid(m)) { 316 rv = KERN_FAILURE; 317 goto out; 318 } 319 m_map = m; 320 psind = 0; 321 #if VM_NRESERVLEVEL > 0 322 if ((m->flags & PG_FICTITIOUS) == 0 && 323 (m_super = vm_reserv_to_superpage(m)) != NULL && 324 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 325 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 326 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 327 (pagesizes[m_super->psind] - 1)) && !fs->wired && 328 pmap_ps_enabled(fs->map->pmap)) { 329 flags = PS_ALL_VALID; 330 if ((fs->prot & VM_PROT_WRITE) != 0) { 331 /* 332 * Create a superpage mapping allowing write access 333 * only if none of the constituent pages are busy and 334 * all of them are already dirty (except possibly for 335 * the page that was faulted on). 336 */ 337 flags |= PS_NONE_BUSY; 338 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 339 flags |= PS_ALL_DIRTY; 340 } 341 if (vm_page_ps_test(m_super, flags, m)) { 342 m_map = m_super; 343 psind = m_super->psind; 344 vaddr = rounddown2(vaddr, pagesizes[psind]); 345 /* Preset the modified bit for dirty superpages. */ 346 if ((flags & PS_ALL_DIRTY) != 0) 347 fs->fault_type |= VM_PROT_WRITE; 348 } 349 } 350 #endif 351 rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 352 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 353 if (rv != KERN_SUCCESS) 354 goto out; 355 if (fs->m_hold != NULL) { 356 (*fs->m_hold) = m; 357 vm_page_wire(m); 358 } 359 if (psind == 0 && !fs->wired) 360 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 361 VM_OBJECT_RUNLOCK(fs->first_object); 362 vm_fault_dirty(fs, m); 363 vm_map_lookup_done(fs->map, fs->entry); 364 curthread->td_ru.ru_minflt++; 365 366 out: 367 vm_object_unbusy(fs->first_object); 368 return (rv); 369 } 370 371 static void 372 vm_fault_restore_map_lock(struct faultstate *fs) 373 { 374 375 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 376 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 377 378 if (!vm_map_trylock_read(fs->map)) { 379 VM_OBJECT_WUNLOCK(fs->first_object); 380 vm_map_lock_read(fs->map); 381 VM_OBJECT_WLOCK(fs->first_object); 382 } 383 fs->lookup_still_valid = true; 384 } 385 386 static void 387 vm_fault_populate_check_page(vm_page_t m) 388 { 389 390 /* 391 * Check each page to ensure that the pager is obeying the 392 * interface: the page must be installed in the object, fully 393 * valid, and exclusively busied. 394 */ 395 MPASS(m != NULL); 396 MPASS(vm_page_all_valid(m)); 397 MPASS(vm_page_xbusied(m)); 398 } 399 400 static void 401 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 402 vm_pindex_t last) 403 { 404 vm_page_t m; 405 vm_pindex_t pidx; 406 407 VM_OBJECT_ASSERT_WLOCKED(object); 408 MPASS(first <= last); 409 for (pidx = first, m = vm_page_lookup(object, pidx); 410 pidx <= last; pidx++, m = vm_page_next(m)) { 411 vm_fault_populate_check_page(m); 412 vm_page_deactivate(m); 413 vm_page_xunbusy(m); 414 } 415 } 416 417 static int 418 vm_fault_populate(struct faultstate *fs) 419 { 420 vm_offset_t vaddr; 421 vm_page_t m; 422 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 423 int i, npages, psind, rv; 424 425 MPASS(fs->object == fs->first_object); 426 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 427 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 428 MPASS(fs->first_object->backing_object == NULL); 429 MPASS(fs->lookup_still_valid); 430 431 pager_first = OFF_TO_IDX(fs->entry->offset); 432 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 433 unlock_map(fs); 434 unlock_vp(fs); 435 436 /* 437 * Call the pager (driver) populate() method. 438 * 439 * There is no guarantee that the method will be called again 440 * if the current fault is for read, and a future fault is 441 * for write. Report the entry's maximum allowed protection 442 * to the driver. 443 */ 444 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 445 fs->fault_type, fs->entry->max_protection, &pager_first, &pager_last); 446 447 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 448 if (rv == VM_PAGER_BAD) { 449 /* 450 * VM_PAGER_BAD is the backdoor for a pager to request 451 * normal fault handling. 452 */ 453 vm_fault_restore_map_lock(fs); 454 if (fs->map->timestamp != fs->map_generation) 455 return (KERN_RESTART); 456 return (KERN_NOT_RECEIVER); 457 } 458 if (rv != VM_PAGER_OK) 459 return (KERN_FAILURE); /* AKA SIGSEGV */ 460 461 /* Ensure that the driver is obeying the interface. */ 462 MPASS(pager_first <= pager_last); 463 MPASS(fs->first_pindex <= pager_last); 464 MPASS(fs->first_pindex >= pager_first); 465 MPASS(pager_last < fs->first_object->size); 466 467 vm_fault_restore_map_lock(fs); 468 if (fs->map->timestamp != fs->map_generation) { 469 vm_fault_populate_cleanup(fs->first_object, pager_first, 470 pager_last); 471 return (KERN_RESTART); 472 } 473 474 /* 475 * The map is unchanged after our last unlock. Process the fault. 476 * 477 * The range [pager_first, pager_last] that is given to the 478 * pager is only a hint. The pager may populate any range 479 * within the object that includes the requested page index. 480 * In case the pager expanded the range, clip it to fit into 481 * the map entry. 482 */ 483 map_first = OFF_TO_IDX(fs->entry->offset); 484 if (map_first > pager_first) { 485 vm_fault_populate_cleanup(fs->first_object, pager_first, 486 map_first - 1); 487 pager_first = map_first; 488 } 489 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 490 if (map_last < pager_last) { 491 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 492 pager_last); 493 pager_last = map_last; 494 } 495 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 496 pidx <= pager_last; 497 pidx += npages, m = vm_page_next(&m[npages - 1])) { 498 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 499 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 500 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 501 psind = m->psind; 502 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 503 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 504 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 505 psind = 0; 506 #else 507 psind = 0; 508 #endif 509 npages = atop(pagesizes[psind]); 510 for (i = 0; i < npages; i++) { 511 vm_fault_populate_check_page(&m[i]); 512 vm_fault_dirty(fs, &m[i]); 513 } 514 VM_OBJECT_WUNLOCK(fs->first_object); 515 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 516 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 517 #if defined(__amd64__) 518 if (psind > 0 && rv == KERN_FAILURE) { 519 for (i = 0; i < npages; i++) { 520 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 521 &m[i], fs->prot, fs->fault_type | 522 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 523 MPASS(rv == KERN_SUCCESS); 524 } 525 } 526 #else 527 MPASS(rv == KERN_SUCCESS); 528 #endif 529 VM_OBJECT_WLOCK(fs->first_object); 530 for (i = 0; i < npages; i++) { 531 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 532 vm_page_wire(&m[i]); 533 else 534 vm_page_activate(&m[i]); 535 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 536 (*fs->m_hold) = &m[i]; 537 vm_page_wire(&m[i]); 538 } 539 vm_page_xunbusy(&m[i]); 540 } 541 } 542 curthread->td_ru.ru_majflt++; 543 return (KERN_SUCCESS); 544 } 545 546 static int prot_fault_translation; 547 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 548 &prot_fault_translation, 0, 549 "Control signal to deliver on protection fault"); 550 551 /* compat definition to keep common code for signal translation */ 552 #define UCODE_PAGEFLT 12 553 #ifdef T_PAGEFLT 554 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 555 #endif 556 557 /* 558 * vm_fault_trap: 559 * 560 * Handle a page fault occurring at the given address, 561 * requiring the given permissions, in the map specified. 562 * If successful, the page is inserted into the 563 * associated physical map. 564 * 565 * NOTE: the given address should be truncated to the 566 * proper page address. 567 * 568 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 569 * a standard error specifying why the fault is fatal is returned. 570 * 571 * The map in question must be referenced, and remains so. 572 * Caller may hold no locks. 573 */ 574 int 575 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 576 int fault_flags, int *signo, int *ucode) 577 { 578 int result; 579 580 MPASS(signo == NULL || ucode != NULL); 581 #ifdef KTRACE 582 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 583 ktrfault(vaddr, fault_type); 584 #endif 585 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 586 NULL); 587 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 588 result == KERN_INVALID_ADDRESS || 589 result == KERN_RESOURCE_SHORTAGE || 590 result == KERN_PROTECTION_FAILURE || 591 result == KERN_OUT_OF_BOUNDS, 592 ("Unexpected Mach error %d from vm_fault()", result)); 593 #ifdef KTRACE 594 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 595 ktrfaultend(result); 596 #endif 597 if (result != KERN_SUCCESS && signo != NULL) { 598 switch (result) { 599 case KERN_FAILURE: 600 case KERN_INVALID_ADDRESS: 601 *signo = SIGSEGV; 602 *ucode = SEGV_MAPERR; 603 break; 604 case KERN_RESOURCE_SHORTAGE: 605 *signo = SIGBUS; 606 *ucode = BUS_OOMERR; 607 break; 608 case KERN_OUT_OF_BOUNDS: 609 *signo = SIGBUS; 610 *ucode = BUS_OBJERR; 611 break; 612 case KERN_PROTECTION_FAILURE: 613 if (prot_fault_translation == 0) { 614 /* 615 * Autodetect. This check also covers 616 * the images without the ABI-tag ELF 617 * note. 618 */ 619 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 620 curproc->p_osrel >= P_OSREL_SIGSEGV) { 621 *signo = SIGSEGV; 622 *ucode = SEGV_ACCERR; 623 } else { 624 *signo = SIGBUS; 625 *ucode = UCODE_PAGEFLT; 626 } 627 } else if (prot_fault_translation == 1) { 628 /* Always compat mode. */ 629 *signo = SIGBUS; 630 *ucode = UCODE_PAGEFLT; 631 } else { 632 /* Always SIGSEGV mode. */ 633 *signo = SIGSEGV; 634 *ucode = SEGV_ACCERR; 635 } 636 break; 637 default: 638 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 639 result)); 640 break; 641 } 642 } 643 return (result); 644 } 645 646 static int 647 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 648 { 649 struct vnode *vp; 650 int error, locked; 651 652 if (fs->object->type != OBJT_VNODE) 653 return (KERN_SUCCESS); 654 vp = fs->object->handle; 655 if (vp == fs->vp) { 656 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 657 return (KERN_SUCCESS); 658 } 659 660 /* 661 * Perform an unlock in case the desired vnode changed while 662 * the map was unlocked during a retry. 663 */ 664 unlock_vp(fs); 665 666 locked = VOP_ISLOCKED(vp); 667 if (locked != LK_EXCLUSIVE) 668 locked = LK_SHARED; 669 670 /* 671 * We must not sleep acquiring the vnode lock while we have 672 * the page exclusive busied or the object's 673 * paging-in-progress count incremented. Otherwise, we could 674 * deadlock. 675 */ 676 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); 677 if (error == 0) { 678 fs->vp = vp; 679 return (KERN_SUCCESS); 680 } 681 682 vhold(vp); 683 if (objlocked) 684 unlock_and_deallocate(fs); 685 else 686 fault_deallocate(fs); 687 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); 688 vdrop(vp); 689 fs->vp = vp; 690 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 691 return (KERN_RESOURCE_SHORTAGE); 692 } 693 694 /* 695 * Calculate the desired readahead. Handle drop-behind. 696 * 697 * Returns the number of readahead blocks to pass to the pager. 698 */ 699 static int 700 vm_fault_readahead(struct faultstate *fs) 701 { 702 int era, nera; 703 u_char behavior; 704 705 KASSERT(fs->lookup_still_valid, ("map unlocked")); 706 era = fs->entry->read_ahead; 707 behavior = vm_map_entry_behavior(fs->entry); 708 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 709 nera = 0; 710 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 711 nera = VM_FAULT_READ_AHEAD_MAX; 712 if (fs->vaddr == fs->entry->next_read) 713 vm_fault_dontneed(fs, fs->vaddr, nera); 714 } else if (fs->vaddr == fs->entry->next_read) { 715 /* 716 * This is a sequential fault. Arithmetically 717 * increase the requested number of pages in 718 * the read-ahead window. The requested 719 * number of pages is "# of sequential faults 720 * x (read ahead min + 1) + read ahead min" 721 */ 722 nera = VM_FAULT_READ_AHEAD_MIN; 723 if (era > 0) { 724 nera += era + 1; 725 if (nera > VM_FAULT_READ_AHEAD_MAX) 726 nera = VM_FAULT_READ_AHEAD_MAX; 727 } 728 if (era == VM_FAULT_READ_AHEAD_MAX) 729 vm_fault_dontneed(fs, fs->vaddr, nera); 730 } else { 731 /* 732 * This is a non-sequential fault. 733 */ 734 nera = 0; 735 } 736 if (era != nera) { 737 /* 738 * A read lock on the map suffices to update 739 * the read ahead count safely. 740 */ 741 fs->entry->read_ahead = nera; 742 } 743 744 return (nera); 745 } 746 747 static int 748 vm_fault_lookup(struct faultstate *fs) 749 { 750 int result; 751 752 KASSERT(!fs->lookup_still_valid, 753 ("vm_fault_lookup: Map already locked.")); 754 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 755 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 756 &fs->first_pindex, &fs->prot, &fs->wired); 757 if (result != KERN_SUCCESS) { 758 unlock_vp(fs); 759 return (result); 760 } 761 762 fs->map_generation = fs->map->timestamp; 763 764 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 765 panic("%s: fault on nofault entry, addr: %#lx", 766 __func__, (u_long)fs->vaddr); 767 } 768 769 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 770 fs->entry->wiring_thread != curthread) { 771 vm_map_unlock_read(fs->map); 772 vm_map_lock(fs->map); 773 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 774 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 775 unlock_vp(fs); 776 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 777 vm_map_unlock_and_wait(fs->map, 0); 778 } else 779 vm_map_unlock(fs->map); 780 return (KERN_RESOURCE_SHORTAGE); 781 } 782 783 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 784 785 if (fs->wired) 786 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 787 else 788 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 789 ("!fs->wired && VM_FAULT_WIRE")); 790 fs->lookup_still_valid = true; 791 792 return (KERN_SUCCESS); 793 } 794 795 static int 796 vm_fault_relookup(struct faultstate *fs) 797 { 798 vm_object_t retry_object; 799 vm_pindex_t retry_pindex; 800 vm_prot_t retry_prot; 801 int result; 802 803 if (!vm_map_trylock_read(fs->map)) 804 return (KERN_RESTART); 805 806 fs->lookup_still_valid = true; 807 if (fs->map->timestamp == fs->map_generation) 808 return (KERN_SUCCESS); 809 810 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 811 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 812 &fs->wired); 813 if (result != KERN_SUCCESS) { 814 /* 815 * If retry of map lookup would have blocked then 816 * retry fault from start. 817 */ 818 if (result == KERN_FAILURE) 819 return (KERN_RESTART); 820 return (result); 821 } 822 if (retry_object != fs->first_object || 823 retry_pindex != fs->first_pindex) 824 return (KERN_RESTART); 825 826 /* 827 * Check whether the protection has changed or the object has 828 * been copied while we left the map unlocked. Changing from 829 * read to write permission is OK - we leave the page 830 * write-protected, and catch the write fault. Changing from 831 * write to read permission means that we can't mark the page 832 * write-enabled after all. 833 */ 834 fs->prot &= retry_prot; 835 fs->fault_type &= retry_prot; 836 if (fs->prot == 0) 837 return (KERN_RESTART); 838 839 /* Reassert because wired may have changed. */ 840 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 841 ("!wired && VM_FAULT_WIRE")); 842 843 return (KERN_SUCCESS); 844 } 845 846 static void 847 vm_fault_cow(struct faultstate *fs) 848 { 849 bool is_first_object_locked; 850 851 /* 852 * This allows pages to be virtually copied from a backing_object 853 * into the first_object, where the backing object has no other 854 * refs to it, and cannot gain any more refs. Instead of a bcopy, 855 * we just move the page from the backing object to the first 856 * object. Note that we must mark the page dirty in the first 857 * object so that it will go out to swap when needed. 858 */ 859 is_first_object_locked = false; 860 if ( 861 /* 862 * Only one shadow object and no other refs. 863 */ 864 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 865 /* 866 * No other ways to look the object up 867 */ 868 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 869 /* 870 * We don't chase down the shadow chain and we can acquire locks. 871 */ 872 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 873 fs->object == fs->first_object->backing_object && 874 VM_OBJECT_TRYWLOCK(fs->object)) { 875 876 /* 877 * Remove but keep xbusy for replace. fs->m is moved into 878 * fs->first_object and left busy while fs->first_m is 879 * conditionally freed. 880 */ 881 vm_page_remove_xbusy(fs->m); 882 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 883 fs->first_m); 884 vm_page_dirty(fs->m); 885 #if VM_NRESERVLEVEL > 0 886 /* 887 * Rename the reservation. 888 */ 889 vm_reserv_rename(fs->m, fs->first_object, fs->object, 890 OFF_TO_IDX(fs->first_object->backing_object_offset)); 891 #endif 892 VM_OBJECT_WUNLOCK(fs->object); 893 VM_OBJECT_WUNLOCK(fs->first_object); 894 fs->first_m = fs->m; 895 fs->m = NULL; 896 VM_CNT_INC(v_cow_optim); 897 } else { 898 if (is_first_object_locked) 899 VM_OBJECT_WUNLOCK(fs->first_object); 900 /* 901 * Oh, well, lets copy it. 902 */ 903 pmap_copy_page(fs->m, fs->first_m); 904 vm_page_valid(fs->first_m); 905 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 906 vm_page_wire(fs->first_m); 907 vm_page_unwire(fs->m, PQ_INACTIVE); 908 } 909 /* 910 * Save the cow page to be released after 911 * pmap_enter is complete. 912 */ 913 fs->m_cow = fs->m; 914 fs->m = NULL; 915 } 916 /* 917 * fs->object != fs->first_object due to above 918 * conditional 919 */ 920 vm_object_pip_wakeup(fs->object); 921 922 /* 923 * Only use the new page below... 924 */ 925 fs->object = fs->first_object; 926 fs->pindex = fs->first_pindex; 927 fs->m = fs->first_m; 928 VM_CNT_INC(v_cow_faults); 929 curthread->td_cow++; 930 } 931 932 static bool 933 vm_fault_next(struct faultstate *fs) 934 { 935 vm_object_t next_object; 936 937 /* 938 * The requested page does not exist at this object/ 939 * offset. Remove the invalid page from the object, 940 * waking up anyone waiting for it, and continue on to 941 * the next object. However, if this is the top-level 942 * object, we must leave the busy page in place to 943 * prevent another process from rushing past us, and 944 * inserting the page in that object at the same time 945 * that we are. 946 */ 947 if (fs->object == fs->first_object) { 948 fs->first_m = fs->m; 949 fs->m = NULL; 950 } else 951 fault_page_free(&fs->m); 952 953 /* 954 * Move on to the next object. Lock the next object before 955 * unlocking the current one. 956 */ 957 VM_OBJECT_ASSERT_WLOCKED(fs->object); 958 next_object = fs->object->backing_object; 959 if (next_object == NULL) 960 return (false); 961 MPASS(fs->first_m != NULL); 962 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 963 VM_OBJECT_WLOCK(next_object); 964 vm_object_pip_add(next_object, 1); 965 if (fs->object != fs->first_object) 966 vm_object_pip_wakeup(fs->object); 967 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 968 VM_OBJECT_WUNLOCK(fs->object); 969 fs->object = next_object; 970 971 return (true); 972 } 973 974 static void 975 vm_fault_zerofill(struct faultstate *fs) 976 { 977 978 /* 979 * If there's no object left, fill the page in the top 980 * object with zeros. 981 */ 982 if (fs->object != fs->first_object) { 983 vm_object_pip_wakeup(fs->object); 984 fs->object = fs->first_object; 985 fs->pindex = fs->first_pindex; 986 } 987 MPASS(fs->first_m != NULL); 988 MPASS(fs->m == NULL); 989 fs->m = fs->first_m; 990 fs->first_m = NULL; 991 992 /* 993 * Zero the page if necessary and mark it valid. 994 */ 995 if ((fs->m->flags & PG_ZERO) == 0) { 996 pmap_zero_page(fs->m); 997 } else { 998 VM_CNT_INC(v_ozfod); 999 } 1000 VM_CNT_INC(v_zfod); 1001 vm_page_valid(fs->m); 1002 } 1003 1004 /* 1005 * Allocate a page directly or via the object populate method. 1006 */ 1007 static int 1008 vm_fault_allocate(struct faultstate *fs) 1009 { 1010 struct domainset *dset; 1011 int alloc_req; 1012 int rv; 1013 1014 1015 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1016 rv = vm_fault_lock_vnode(fs, true); 1017 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1018 if (rv == KERN_RESOURCE_SHORTAGE) 1019 return (rv); 1020 } 1021 1022 if (fs->pindex >= fs->object->size) 1023 return (KERN_OUT_OF_BOUNDS); 1024 1025 if (fs->object == fs->first_object && 1026 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1027 fs->first_object->shadow_count == 0) { 1028 rv = vm_fault_populate(fs); 1029 switch (rv) { 1030 case KERN_SUCCESS: 1031 case KERN_FAILURE: 1032 case KERN_RESTART: 1033 return (rv); 1034 case KERN_NOT_RECEIVER: 1035 /* 1036 * Pager's populate() method 1037 * returned VM_PAGER_BAD. 1038 */ 1039 break; 1040 default: 1041 panic("inconsistent return codes"); 1042 } 1043 } 1044 1045 /* 1046 * Allocate a new page for this object/offset pair. 1047 * 1048 * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1049 * might be not observed there, and allocation can fail, causing 1050 * restart and new reading of the p_flag. 1051 */ 1052 dset = fs->object->domain.dr_policy; 1053 if (dset == NULL) 1054 dset = curthread->td_domain.dr_policy; 1055 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1056 #if VM_NRESERVLEVEL > 0 1057 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1058 #endif 1059 alloc_req = P_KILLED(curproc) ? 1060 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1061 if (fs->object->type != OBJT_VNODE && 1062 fs->object->backing_object == NULL) 1063 alloc_req |= VM_ALLOC_ZERO; 1064 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1065 } 1066 if (fs->m == NULL) { 1067 unlock_and_deallocate(fs); 1068 if (vm_pfault_oom_attempts < 0 || 1069 fs->oom < vm_pfault_oom_attempts) { 1070 fs->oom++; 1071 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1072 } else { 1073 if (bootverbose) 1074 printf( 1075 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1076 curproc->p_pid, curproc->p_comm); 1077 vm_pageout_oom(VM_OOM_MEM_PF); 1078 fs->oom = 0; 1079 } 1080 return (KERN_RESOURCE_SHORTAGE); 1081 } 1082 fs->oom = 0; 1083 1084 return (KERN_NOT_RECEIVER); 1085 } 1086 1087 /* 1088 * Call the pager to retrieve the page if there is a chance 1089 * that the pager has it, and potentially retrieve additional 1090 * pages at the same time. 1091 */ 1092 static int 1093 vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 1094 { 1095 vm_offset_t e_end, e_start; 1096 int ahead, behind, cluster_offset, rv; 1097 u_char behavior; 1098 1099 /* 1100 * Prepare for unlocking the map. Save the map 1101 * entry's start and end addresses, which are used to 1102 * optimize the size of the pager operation below. 1103 * Even if the map entry's addresses change after 1104 * unlocking the map, using the saved addresses is 1105 * safe. 1106 */ 1107 e_start = fs->entry->start; 1108 e_end = fs->entry->end; 1109 behavior = vm_map_entry_behavior(fs->entry); 1110 1111 /* 1112 * Release the map lock before locking the vnode or 1113 * sleeping in the pager. (If the current object has 1114 * a shadow, then an earlier iteration of this loop 1115 * may have already unlocked the map.) 1116 */ 1117 unlock_map(fs); 1118 1119 rv = vm_fault_lock_vnode(fs, false); 1120 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1121 if (rv == KERN_RESOURCE_SHORTAGE) 1122 return (rv); 1123 KASSERT(fs->vp == NULL || !fs->map->system_map, 1124 ("vm_fault: vnode-backed object mapped by system map")); 1125 1126 /* 1127 * Page in the requested page and hint the pager, 1128 * that it may bring up surrounding pages. 1129 */ 1130 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1131 P_KILLED(curproc)) { 1132 behind = 0; 1133 ahead = 0; 1134 } else { 1135 /* Is this a sequential fault? */ 1136 if (nera > 0) { 1137 behind = 0; 1138 ahead = nera; 1139 } else { 1140 /* 1141 * Request a cluster of pages that is 1142 * aligned to a VM_FAULT_READ_DEFAULT 1143 * page offset boundary within the 1144 * object. Alignment to a page offset 1145 * boundary is more likely to coincide 1146 * with the underlying file system 1147 * block than alignment to a virtual 1148 * address boundary. 1149 */ 1150 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1151 behind = ulmin(cluster_offset, 1152 atop(fs->vaddr - e_start)); 1153 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1154 } 1155 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1156 } 1157 *behindp = behind; 1158 *aheadp = ahead; 1159 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1160 if (rv == VM_PAGER_OK) 1161 return (KERN_SUCCESS); 1162 if (rv == VM_PAGER_ERROR) 1163 printf("vm_fault: pager read error, pid %d (%s)\n", 1164 curproc->p_pid, curproc->p_comm); 1165 /* 1166 * If an I/O error occurred or the requested page was 1167 * outside the range of the pager, clean up and return 1168 * an error. 1169 */ 1170 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 1171 return (KERN_OUT_OF_BOUNDS); 1172 return (KERN_NOT_RECEIVER); 1173 } 1174 1175 /* 1176 * Wait/Retry if the page is busy. We have to do this if the page is 1177 * either exclusive or shared busy because the vm_pager may be using 1178 * read busy for pageouts (and even pageins if it is the vnode pager), 1179 * and we could end up trying to pagein and pageout the same page 1180 * simultaneously. 1181 * 1182 * We can theoretically allow the busy case on a read fault if the page 1183 * is marked valid, but since such pages are typically already pmap'd, 1184 * putting that special case in might be more effort then it is worth. 1185 * We cannot under any circumstances mess around with a shared busied 1186 * page except, perhaps, to pmap it. 1187 */ 1188 static void 1189 vm_fault_busy_sleep(struct faultstate *fs) 1190 { 1191 /* 1192 * Reference the page before unlocking and 1193 * sleeping so that the page daemon is less 1194 * likely to reclaim it. 1195 */ 1196 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1197 if (fs->object != fs->first_object) { 1198 fault_page_release(&fs->first_m); 1199 vm_object_pip_wakeup(fs->first_object); 1200 } 1201 vm_object_pip_wakeup(fs->object); 1202 unlock_map(fs); 1203 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 1204 vm_page_busy_sleep(fs->m, "vmpfw", false); 1205 else 1206 VM_OBJECT_WUNLOCK(fs->object); 1207 VM_CNT_INC(v_intrans); 1208 vm_object_deallocate(fs->first_object); 1209 } 1210 1211 int 1212 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1213 int fault_flags, vm_page_t *m_hold) 1214 { 1215 struct faultstate fs; 1216 int ahead, behind, faultcount; 1217 int nera, result, rv; 1218 bool dead, hardfault; 1219 1220 VM_CNT_INC(v_vm_faults); 1221 1222 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1223 return (KERN_PROTECTION_FAILURE); 1224 1225 fs.vp = NULL; 1226 fs.vaddr = vaddr; 1227 fs.m_hold = m_hold; 1228 fs.fault_flags = fault_flags; 1229 fs.map = map; 1230 fs.lookup_still_valid = false; 1231 fs.oom = 0; 1232 faultcount = 0; 1233 nera = -1; 1234 hardfault = false; 1235 1236 RetryFault: 1237 fs.fault_type = fault_type; 1238 1239 /* 1240 * Find the backing store object and offset into it to begin the 1241 * search. 1242 */ 1243 result = vm_fault_lookup(&fs); 1244 if (result != KERN_SUCCESS) { 1245 if (result == KERN_RESOURCE_SHORTAGE) 1246 goto RetryFault; 1247 return (result); 1248 } 1249 1250 /* 1251 * Try to avoid lock contention on the top-level object through 1252 * special-case handling of some types of page faults, specifically, 1253 * those that are mapping an existing page from the top-level object. 1254 * Under this condition, a read lock on the object suffices, allowing 1255 * multiple page faults of a similar type to run in parallel. 1256 */ 1257 if (fs.vp == NULL /* avoid locked vnode leak */ && 1258 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1259 VM_OBJECT_RLOCK(fs.first_object); 1260 rv = vm_fault_soft_fast(&fs); 1261 if (rv == KERN_SUCCESS) 1262 return (rv); 1263 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1264 VM_OBJECT_RUNLOCK(fs.first_object); 1265 VM_OBJECT_WLOCK(fs.first_object); 1266 } 1267 } else { 1268 VM_OBJECT_WLOCK(fs.first_object); 1269 } 1270 1271 /* 1272 * Make a reference to this object to prevent its disposal while we 1273 * are messing with it. Once we have the reference, the map is free 1274 * to be diddled. Since objects reference their shadows (and copies), 1275 * they will stay around as well. 1276 * 1277 * Bump the paging-in-progress count to prevent size changes (e.g. 1278 * truncation operations) during I/O. 1279 */ 1280 vm_object_reference_locked(fs.first_object); 1281 vm_object_pip_add(fs.first_object, 1); 1282 1283 fs.m_cow = fs.m = fs.first_m = NULL; 1284 1285 /* 1286 * Search for the page at object/offset. 1287 */ 1288 fs.object = fs.first_object; 1289 fs.pindex = fs.first_pindex; 1290 while (TRUE) { 1291 KASSERT(fs.m == NULL, 1292 ("page still set %p at loop start", fs.m)); 1293 /* 1294 * If the object is marked for imminent termination, 1295 * we retry here, since the collapse pass has raced 1296 * with us. Otherwise, if we see terminally dead 1297 * object, return fail. 1298 */ 1299 if ((fs.object->flags & OBJ_DEAD) != 0) { 1300 dead = fs.object->type == OBJT_DEAD; 1301 unlock_and_deallocate(&fs); 1302 if (dead) 1303 return (KERN_PROTECTION_FAILURE); 1304 pause("vmf_de", 1); 1305 goto RetryFault; 1306 } 1307 1308 /* 1309 * See if page is resident 1310 */ 1311 fs.m = vm_page_lookup(fs.object, fs.pindex); 1312 if (fs.m != NULL) { 1313 if (vm_page_tryxbusy(fs.m) == 0) { 1314 vm_fault_busy_sleep(&fs); 1315 goto RetryFault; 1316 } 1317 1318 /* 1319 * The page is marked busy for other processes and the 1320 * pagedaemon. If it still is completely valid we 1321 * are done. 1322 */ 1323 if (vm_page_all_valid(fs.m)) { 1324 VM_OBJECT_WUNLOCK(fs.object); 1325 break; /* break to PAGE HAS BEEN FOUND. */ 1326 } 1327 } 1328 VM_OBJECT_ASSERT_WLOCKED(fs.object); 1329 1330 /* 1331 * Page is not resident. If the pager might contain the page 1332 * or this is the beginning of the search, allocate a new 1333 * page. (Default objects are zero-fill, so there is no real 1334 * pager for them.) 1335 */ 1336 if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1337 fs.object == fs.first_object)) { 1338 rv = vm_fault_allocate(&fs); 1339 switch (rv) { 1340 case KERN_RESTART: 1341 unlock_and_deallocate(&fs); 1342 /* FALLTHROUGH */ 1343 case KERN_RESOURCE_SHORTAGE: 1344 goto RetryFault; 1345 case KERN_SUCCESS: 1346 case KERN_FAILURE: 1347 case KERN_OUT_OF_BOUNDS: 1348 unlock_and_deallocate(&fs); 1349 return (rv); 1350 case KERN_NOT_RECEIVER: 1351 break; 1352 default: 1353 panic("vm_fault: Unhandled rv %d", rv); 1354 } 1355 } 1356 1357 /* 1358 * Default objects have no pager so no exclusive busy exists 1359 * to protect this page in the chain. Skip to the next 1360 * object without dropping the lock to preserve atomicity of 1361 * shadow faults. 1362 */ 1363 if (fs.object->type != OBJT_DEFAULT) { 1364 /* 1365 * At this point, we have either allocated a new page 1366 * or found an existing page that is only partially 1367 * valid. 1368 * 1369 * We hold a reference on the current object and the 1370 * page is exclusive busied. The exclusive busy 1371 * prevents simultaneous faults and collapses while 1372 * the object lock is dropped. 1373 */ 1374 VM_OBJECT_WUNLOCK(fs.object); 1375 1376 /* 1377 * If the pager for the current object might have 1378 * the page, then determine the number of additional 1379 * pages to read and potentially reprioritize 1380 * previously read pages for earlier reclamation. 1381 * These operations should only be performed once per 1382 * page fault. Even if the current pager doesn't 1383 * have the page, the number of additional pages to 1384 * read will apply to subsequent objects in the 1385 * shadow chain. 1386 */ 1387 if (nera == -1 && !P_KILLED(curproc)) 1388 nera = vm_fault_readahead(&fs); 1389 1390 rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 1391 if (rv == KERN_SUCCESS) { 1392 faultcount = behind + 1 + ahead; 1393 hardfault = true; 1394 break; /* break to PAGE HAS BEEN FOUND. */ 1395 } 1396 if (rv == KERN_RESOURCE_SHORTAGE) 1397 goto RetryFault; 1398 VM_OBJECT_WLOCK(fs.object); 1399 if (rv == KERN_OUT_OF_BOUNDS) { 1400 fault_page_free(&fs.m); 1401 unlock_and_deallocate(&fs); 1402 return (rv); 1403 } 1404 } 1405 1406 /* 1407 * The page was not found in the current object. Try to 1408 * traverse into a backing object or zero fill if none is 1409 * found. 1410 */ 1411 if (vm_fault_next(&fs)) 1412 continue; 1413 VM_OBJECT_WUNLOCK(fs.object); 1414 vm_fault_zerofill(&fs); 1415 /* Don't try to prefault neighboring pages. */ 1416 faultcount = 1; 1417 break; /* break to PAGE HAS BEEN FOUND. */ 1418 } 1419 1420 /* 1421 * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 1422 * busied. The object lock must no longer be held. 1423 */ 1424 vm_page_assert_xbusied(fs.m); 1425 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1426 1427 /* 1428 * If the page is being written, but isn't already owned by the 1429 * top-level object, we have to copy it into a new page owned by the 1430 * top-level object. 1431 */ 1432 if (fs.object != fs.first_object) { 1433 /* 1434 * We only really need to copy if we want to write it. 1435 */ 1436 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1437 vm_fault_cow(&fs); 1438 /* 1439 * We only try to prefault read-only mappings to the 1440 * neighboring pages when this copy-on-write fault is 1441 * a hard fault. In other cases, trying to prefault 1442 * is typically wasted effort. 1443 */ 1444 if (faultcount == 0) 1445 faultcount = 1; 1446 1447 } else { 1448 fs.prot &= ~VM_PROT_WRITE; 1449 } 1450 } 1451 1452 /* 1453 * We must verify that the maps have not changed since our last 1454 * lookup. 1455 */ 1456 if (!fs.lookup_still_valid) { 1457 result = vm_fault_relookup(&fs); 1458 if (result != KERN_SUCCESS) { 1459 fault_deallocate(&fs); 1460 if (result == KERN_RESTART) 1461 goto RetryFault; 1462 return (result); 1463 } 1464 } 1465 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1466 1467 /* 1468 * If the page was filled by a pager, save the virtual address that 1469 * should be faulted on next under a sequential access pattern to the 1470 * map entry. A read lock on the map suffices to update this address 1471 * safely. 1472 */ 1473 if (hardfault) 1474 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1475 1476 /* 1477 * Page must be completely valid or it is not fit to 1478 * map into user space. vm_pager_get_pages() ensures this. 1479 */ 1480 vm_page_assert_xbusied(fs.m); 1481 KASSERT(vm_page_all_valid(fs.m), 1482 ("vm_fault: page %p partially invalid", fs.m)); 1483 1484 vm_fault_dirty(&fs, fs.m); 1485 1486 /* 1487 * Put this page into the physical map. We had to do the unlock above 1488 * because pmap_enter() may sleep. We don't put the page 1489 * back on the active queue until later so that the pageout daemon 1490 * won't find it (yet). 1491 */ 1492 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1493 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1494 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1495 fs.wired == 0) 1496 vm_fault_prefault(&fs, vaddr, 1497 faultcount > 0 ? behind : PFBAK, 1498 faultcount > 0 ? ahead : PFFOR, false); 1499 1500 /* 1501 * If the page is not wired down, then put it where the pageout daemon 1502 * can find it. 1503 */ 1504 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1505 vm_page_wire(fs.m); 1506 else 1507 vm_page_activate(fs.m); 1508 if (fs.m_hold != NULL) { 1509 (*fs.m_hold) = fs.m; 1510 vm_page_wire(fs.m); 1511 } 1512 vm_page_xunbusy(fs.m); 1513 fs.m = NULL; 1514 1515 /* 1516 * Unlock everything, and return 1517 */ 1518 fault_deallocate(&fs); 1519 if (hardfault) { 1520 VM_CNT_INC(v_io_faults); 1521 curthread->td_ru.ru_majflt++; 1522 #ifdef RACCT 1523 if (racct_enable && fs.object->type == OBJT_VNODE) { 1524 PROC_LOCK(curproc); 1525 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1526 racct_add_force(curproc, RACCT_WRITEBPS, 1527 PAGE_SIZE + behind * PAGE_SIZE); 1528 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1529 } else { 1530 racct_add_force(curproc, RACCT_READBPS, 1531 PAGE_SIZE + ahead * PAGE_SIZE); 1532 racct_add_force(curproc, RACCT_READIOPS, 1); 1533 } 1534 PROC_UNLOCK(curproc); 1535 } 1536 #endif 1537 } else 1538 curthread->td_ru.ru_minflt++; 1539 1540 return (KERN_SUCCESS); 1541 } 1542 1543 /* 1544 * Speed up the reclamation of pages that precede the faulting pindex within 1545 * the first object of the shadow chain. Essentially, perform the equivalent 1546 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1547 * the faulting pindex by the cluster size when the pages read by vm_fault() 1548 * cross a cluster-size boundary. The cluster size is the greater of the 1549 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1550 * 1551 * When "fs->first_object" is a shadow object, the pages in the backing object 1552 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1553 * function must only be concerned with pages in the first object. 1554 */ 1555 static void 1556 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1557 { 1558 vm_map_entry_t entry; 1559 vm_object_t first_object, object; 1560 vm_offset_t end, start; 1561 vm_page_t m, m_next; 1562 vm_pindex_t pend, pstart; 1563 vm_size_t size; 1564 1565 object = fs->object; 1566 VM_OBJECT_ASSERT_UNLOCKED(object); 1567 first_object = fs->first_object; 1568 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1569 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1570 VM_OBJECT_RLOCK(first_object); 1571 size = VM_FAULT_DONTNEED_MIN; 1572 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1573 size = pagesizes[1]; 1574 end = rounddown2(vaddr, size); 1575 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1576 (entry = fs->entry)->start < end) { 1577 if (end - entry->start < size) 1578 start = entry->start; 1579 else 1580 start = end - size; 1581 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1582 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1583 entry->start); 1584 m_next = vm_page_find_least(first_object, pstart); 1585 pend = OFF_TO_IDX(entry->offset) + atop(end - 1586 entry->start); 1587 while ((m = m_next) != NULL && m->pindex < pend) { 1588 m_next = TAILQ_NEXT(m, listq); 1589 if (!vm_page_all_valid(m) || 1590 vm_page_busied(m)) 1591 continue; 1592 1593 /* 1594 * Don't clear PGA_REFERENCED, since it would 1595 * likely represent a reference by a different 1596 * process. 1597 * 1598 * Typically, at this point, prefetched pages 1599 * are still in the inactive queue. Only 1600 * pages that triggered page faults are in the 1601 * active queue. The test for whether the page 1602 * is in the inactive queue is racy; in the 1603 * worst case we will requeue the page 1604 * unnecessarily. 1605 */ 1606 if (!vm_page_inactive(m)) 1607 vm_page_deactivate(m); 1608 } 1609 } 1610 VM_OBJECT_RUNLOCK(first_object); 1611 } 1612 } 1613 1614 /* 1615 * vm_fault_prefault provides a quick way of clustering 1616 * pagefaults into a processes address space. It is a "cousin" 1617 * of vm_map_pmap_enter, except it runs at page fault time instead 1618 * of mmap time. 1619 */ 1620 static void 1621 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1622 int backward, int forward, bool obj_locked) 1623 { 1624 pmap_t pmap; 1625 vm_map_entry_t entry; 1626 vm_object_t backing_object, lobject; 1627 vm_offset_t addr, starta; 1628 vm_pindex_t pindex; 1629 vm_page_t m; 1630 int i; 1631 1632 pmap = fs->map->pmap; 1633 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1634 return; 1635 1636 entry = fs->entry; 1637 1638 if (addra < backward * PAGE_SIZE) { 1639 starta = entry->start; 1640 } else { 1641 starta = addra - backward * PAGE_SIZE; 1642 if (starta < entry->start) 1643 starta = entry->start; 1644 } 1645 1646 /* 1647 * Generate the sequence of virtual addresses that are candidates for 1648 * prefaulting in an outward spiral from the faulting virtual address, 1649 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1650 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1651 * If the candidate address doesn't have a backing physical page, then 1652 * the loop immediately terminates. 1653 */ 1654 for (i = 0; i < 2 * imax(backward, forward); i++) { 1655 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1656 PAGE_SIZE); 1657 if (addr > addra + forward * PAGE_SIZE) 1658 addr = 0; 1659 1660 if (addr < starta || addr >= entry->end) 1661 continue; 1662 1663 if (!pmap_is_prefaultable(pmap, addr)) 1664 continue; 1665 1666 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1667 lobject = entry->object.vm_object; 1668 if (!obj_locked) 1669 VM_OBJECT_RLOCK(lobject); 1670 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1671 lobject->type == OBJT_DEFAULT && 1672 (backing_object = lobject->backing_object) != NULL) { 1673 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1674 0, ("vm_fault_prefault: unaligned object offset")); 1675 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1676 VM_OBJECT_RLOCK(backing_object); 1677 if (!obj_locked || lobject != entry->object.vm_object) 1678 VM_OBJECT_RUNLOCK(lobject); 1679 lobject = backing_object; 1680 } 1681 if (m == NULL) { 1682 if (!obj_locked || lobject != entry->object.vm_object) 1683 VM_OBJECT_RUNLOCK(lobject); 1684 break; 1685 } 1686 if (vm_page_all_valid(m) && 1687 (m->flags & PG_FICTITIOUS) == 0) 1688 pmap_enter_quick(pmap, addr, m, entry->protection); 1689 if (!obj_locked || lobject != entry->object.vm_object) 1690 VM_OBJECT_RUNLOCK(lobject); 1691 } 1692 } 1693 1694 /* 1695 * Hold each of the physical pages that are mapped by the specified range of 1696 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1697 * and allow the specified types of access, "prot". If all of the implied 1698 * pages are successfully held, then the number of held pages is returned 1699 * together with pointers to those pages in the array "ma". However, if any 1700 * of the pages cannot be held, -1 is returned. 1701 */ 1702 int 1703 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1704 vm_prot_t prot, vm_page_t *ma, int max_count) 1705 { 1706 vm_offset_t end, va; 1707 vm_page_t *mp; 1708 int count; 1709 boolean_t pmap_failed; 1710 1711 if (len == 0) 1712 return (0); 1713 end = round_page(addr + len); 1714 addr = trunc_page(addr); 1715 1716 if (!vm_map_range_valid(map, addr, end)) 1717 return (-1); 1718 1719 if (atop(end - addr) > max_count) 1720 panic("vm_fault_quick_hold_pages: count > max_count"); 1721 count = atop(end - addr); 1722 1723 /* 1724 * Most likely, the physical pages are resident in the pmap, so it is 1725 * faster to try pmap_extract_and_hold() first. 1726 */ 1727 pmap_failed = FALSE; 1728 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1729 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1730 if (*mp == NULL) 1731 pmap_failed = TRUE; 1732 else if ((prot & VM_PROT_WRITE) != 0 && 1733 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1734 /* 1735 * Explicitly dirty the physical page. Otherwise, the 1736 * caller's changes may go unnoticed because they are 1737 * performed through an unmanaged mapping or by a DMA 1738 * operation. 1739 * 1740 * The object lock is not held here. 1741 * See vm_page_clear_dirty_mask(). 1742 */ 1743 vm_page_dirty(*mp); 1744 } 1745 } 1746 if (pmap_failed) { 1747 /* 1748 * One or more pages could not be held by the pmap. Either no 1749 * page was mapped at the specified virtual address or that 1750 * mapping had insufficient permissions. Attempt to fault in 1751 * and hold these pages. 1752 * 1753 * If vm_fault_disable_pagefaults() was called, 1754 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1755 * acquire MD VM locks, which means we must not call 1756 * vm_fault(). Some (out of tree) callers mark 1757 * too wide a code area with vm_fault_disable_pagefaults() 1758 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1759 * the proper behaviour explicitly. 1760 */ 1761 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1762 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1763 goto error; 1764 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1765 if (*mp == NULL && vm_fault(map, va, prot, 1766 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1767 goto error; 1768 } 1769 return (count); 1770 error: 1771 for (mp = ma; mp < ma + count; mp++) 1772 if (*mp != NULL) 1773 vm_page_unwire(*mp, PQ_INACTIVE); 1774 return (-1); 1775 } 1776 1777 /* 1778 * Routine: 1779 * vm_fault_copy_entry 1780 * Function: 1781 * Create new shadow object backing dst_entry with private copy of 1782 * all underlying pages. When src_entry is equal to dst_entry, 1783 * function implements COW for wired-down map entry. Otherwise, 1784 * it forks wired entry into dst_map. 1785 * 1786 * In/out conditions: 1787 * The source and destination maps must be locked for write. 1788 * The source map entry must be wired down (or be a sharing map 1789 * entry corresponding to a main map entry that is wired down). 1790 */ 1791 void 1792 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1793 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1794 vm_ooffset_t *fork_charge) 1795 { 1796 vm_object_t backing_object, dst_object, object, src_object; 1797 vm_pindex_t dst_pindex, pindex, src_pindex; 1798 vm_prot_t access, prot; 1799 vm_offset_t vaddr; 1800 vm_page_t dst_m; 1801 vm_page_t src_m; 1802 boolean_t upgrade; 1803 1804 #ifdef lint 1805 src_map++; 1806 #endif /* lint */ 1807 1808 upgrade = src_entry == dst_entry; 1809 access = prot = dst_entry->protection; 1810 1811 src_object = src_entry->object.vm_object; 1812 src_pindex = OFF_TO_IDX(src_entry->offset); 1813 1814 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1815 dst_object = src_object; 1816 vm_object_reference(dst_object); 1817 } else { 1818 /* 1819 * Create the top-level object for the destination entry. 1820 * Doesn't actually shadow anything - we copy the pages 1821 * directly. 1822 */ 1823 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1824 dst_entry->start), NULL, NULL, 0); 1825 #if VM_NRESERVLEVEL > 0 1826 dst_object->flags |= OBJ_COLORED; 1827 dst_object->pg_color = atop(dst_entry->start); 1828 #endif 1829 dst_object->domain = src_object->domain; 1830 dst_object->charge = dst_entry->end - dst_entry->start; 1831 } 1832 1833 VM_OBJECT_WLOCK(dst_object); 1834 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1835 ("vm_fault_copy_entry: vm_object not NULL")); 1836 if (src_object != dst_object) { 1837 dst_entry->object.vm_object = dst_object; 1838 dst_entry->offset = 0; 1839 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1840 } 1841 if (fork_charge != NULL) { 1842 KASSERT(dst_entry->cred == NULL, 1843 ("vm_fault_copy_entry: leaked swp charge")); 1844 dst_object->cred = curthread->td_ucred; 1845 crhold(dst_object->cred); 1846 *fork_charge += dst_object->charge; 1847 } else if ((dst_object->type == OBJT_DEFAULT || 1848 dst_object->type == OBJT_SWAP) && 1849 dst_object->cred == NULL) { 1850 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1851 dst_entry)); 1852 dst_object->cred = dst_entry->cred; 1853 dst_entry->cred = NULL; 1854 } 1855 1856 /* 1857 * If not an upgrade, then enter the mappings in the pmap as 1858 * read and/or execute accesses. Otherwise, enter them as 1859 * write accesses. 1860 * 1861 * A writeable large page mapping is only created if all of 1862 * the constituent small page mappings are modified. Marking 1863 * PTEs as modified on inception allows promotion to happen 1864 * without taking potentially large number of soft faults. 1865 */ 1866 if (!upgrade) 1867 access &= ~VM_PROT_WRITE; 1868 1869 /* 1870 * Loop through all of the virtual pages within the entry's 1871 * range, copying each page from the source object to the 1872 * destination object. Since the source is wired, those pages 1873 * must exist. In contrast, the destination is pageable. 1874 * Since the destination object doesn't share any backing storage 1875 * with the source object, all of its pages must be dirtied, 1876 * regardless of whether they can be written. 1877 */ 1878 for (vaddr = dst_entry->start, dst_pindex = 0; 1879 vaddr < dst_entry->end; 1880 vaddr += PAGE_SIZE, dst_pindex++) { 1881 again: 1882 /* 1883 * Find the page in the source object, and copy it in. 1884 * Because the source is wired down, the page will be 1885 * in memory. 1886 */ 1887 if (src_object != dst_object) 1888 VM_OBJECT_RLOCK(src_object); 1889 object = src_object; 1890 pindex = src_pindex + dst_pindex; 1891 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1892 (backing_object = object->backing_object) != NULL) { 1893 /* 1894 * Unless the source mapping is read-only or 1895 * it is presently being upgraded from 1896 * read-only, the first object in the shadow 1897 * chain should provide all of the pages. In 1898 * other words, this loop body should never be 1899 * executed when the source mapping is already 1900 * read/write. 1901 */ 1902 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1903 upgrade, 1904 ("vm_fault_copy_entry: main object missing page")); 1905 1906 VM_OBJECT_RLOCK(backing_object); 1907 pindex += OFF_TO_IDX(object->backing_object_offset); 1908 if (object != dst_object) 1909 VM_OBJECT_RUNLOCK(object); 1910 object = backing_object; 1911 } 1912 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1913 1914 if (object != dst_object) { 1915 /* 1916 * Allocate a page in the destination object. 1917 */ 1918 dst_m = vm_page_alloc(dst_object, (src_object == 1919 dst_object ? src_pindex : 0) + dst_pindex, 1920 VM_ALLOC_NORMAL); 1921 if (dst_m == NULL) { 1922 VM_OBJECT_WUNLOCK(dst_object); 1923 VM_OBJECT_RUNLOCK(object); 1924 vm_wait(dst_object); 1925 VM_OBJECT_WLOCK(dst_object); 1926 goto again; 1927 } 1928 pmap_copy_page(src_m, dst_m); 1929 VM_OBJECT_RUNLOCK(object); 1930 dst_m->dirty = dst_m->valid = src_m->valid; 1931 } else { 1932 dst_m = src_m; 1933 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 1934 goto again; 1935 if (dst_m->pindex >= dst_object->size) { 1936 /* 1937 * We are upgrading. Index can occur 1938 * out of bounds if the object type is 1939 * vnode and the file was truncated. 1940 */ 1941 vm_page_xunbusy(dst_m); 1942 break; 1943 } 1944 } 1945 VM_OBJECT_WUNLOCK(dst_object); 1946 1947 /* 1948 * Enter it in the pmap. If a wired, copy-on-write 1949 * mapping is being replaced by a write-enabled 1950 * mapping, then wire that new mapping. 1951 * 1952 * The page can be invalid if the user called 1953 * msync(MS_INVALIDATE) or truncated the backing vnode 1954 * or shared memory object. In this case, do not 1955 * insert it into pmap, but still do the copy so that 1956 * all copies of the wired map entry have similar 1957 * backing pages. 1958 */ 1959 if (vm_page_all_valid(dst_m)) { 1960 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 1961 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 1962 } 1963 1964 /* 1965 * Mark it no longer busy, and put it on the active list. 1966 */ 1967 VM_OBJECT_WLOCK(dst_object); 1968 1969 if (upgrade) { 1970 if (src_m != dst_m) { 1971 vm_page_unwire(src_m, PQ_INACTIVE); 1972 vm_page_wire(dst_m); 1973 } else { 1974 KASSERT(vm_page_wired(dst_m), 1975 ("dst_m %p is not wired", dst_m)); 1976 } 1977 } else { 1978 vm_page_activate(dst_m); 1979 } 1980 vm_page_xunbusy(dst_m); 1981 } 1982 VM_OBJECT_WUNLOCK(dst_object); 1983 if (upgrade) { 1984 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1985 vm_object_deallocate(src_object); 1986 } 1987 } 1988 1989 /* 1990 * Block entry into the machine-independent layer's page fault handler by 1991 * the calling thread. Subsequent calls to vm_fault() by that thread will 1992 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 1993 * spurious page faults. 1994 */ 1995 int 1996 vm_fault_disable_pagefaults(void) 1997 { 1998 1999 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2000 } 2001 2002 void 2003 vm_fault_enable_pagefaults(int save) 2004 { 2005 2006 curthread_pflags_restore(save); 2007 } 2008