1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 vm_page_t m; 124 vm_object_t object; 125 vm_pindex_t pindex; 126 vm_page_t first_m; 127 vm_object_t first_object; 128 vm_pindex_t first_pindex; 129 vm_map_t map; 130 vm_map_entry_t entry; 131 int map_generation; 132 bool lookup_still_valid; 133 struct vnode *vp; 134 }; 135 136 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 137 int ahead); 138 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 139 int backward, int forward, bool obj_locked); 140 141 static int vm_pfault_oom_attempts = 3; 142 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 143 &vm_pfault_oom_attempts, 0, 144 "Number of page allocation attempts in page fault handler before it " 145 "triggers OOM handling"); 146 147 static int vm_pfault_oom_wait = 10; 148 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 149 &vm_pfault_oom_wait, 0, 150 "Number of seconds to wait for free pages before retrying " 151 "the page fault handler"); 152 153 static inline void 154 fault_page_release(vm_page_t *mp) 155 { 156 vm_page_t m; 157 158 m = *mp; 159 if (m != NULL) { 160 /* 161 * We are likely to loop around again and attempt to busy 162 * this page. Deactivating it leaves it available for 163 * pageout while optimizing fault restarts. 164 */ 165 vm_page_lock(m); 166 vm_page_deactivate(m); 167 vm_page_unlock(m); 168 vm_page_xunbusy(m); 169 *mp = NULL; 170 } 171 } 172 173 static inline void 174 fault_page_free(vm_page_t *mp) 175 { 176 vm_page_t m; 177 178 m = *mp; 179 if (m != NULL) { 180 VM_OBJECT_ASSERT_WLOCKED(m->object); 181 if (!vm_page_wired(m)) 182 vm_page_free(m); 183 *mp = NULL; 184 } 185 } 186 187 static inline void 188 unlock_map(struct faultstate *fs) 189 { 190 191 if (fs->lookup_still_valid) { 192 vm_map_lookup_done(fs->map, fs->entry); 193 fs->lookup_still_valid = false; 194 } 195 } 196 197 static void 198 unlock_vp(struct faultstate *fs) 199 { 200 201 if (fs->vp != NULL) { 202 vput(fs->vp); 203 fs->vp = NULL; 204 } 205 } 206 207 static void 208 fault_deallocate(struct faultstate *fs) 209 { 210 211 fault_page_release(&fs->m); 212 vm_object_pip_wakeup(fs->object); 213 if (fs->object != fs->first_object) { 214 VM_OBJECT_WLOCK(fs->first_object); 215 fault_page_free(&fs->first_m); 216 VM_OBJECT_WUNLOCK(fs->first_object); 217 vm_object_pip_wakeup(fs->first_object); 218 } 219 vm_object_deallocate(fs->first_object); 220 unlock_map(fs); 221 unlock_vp(fs); 222 } 223 224 static void 225 unlock_and_deallocate(struct faultstate *fs) 226 { 227 228 VM_OBJECT_WUNLOCK(fs->object); 229 fault_deallocate(fs); 230 } 231 232 static void 233 vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, 234 vm_prot_t fault_type, int fault_flags) 235 { 236 bool need_dirty; 237 238 if (((prot & VM_PROT_WRITE) == 0 && 239 (fault_flags & VM_FAULT_DIRTY) == 0) || 240 (m->oflags & VPO_UNMANAGED) != 0) 241 return; 242 243 VM_PAGE_OBJECT_BUSY_ASSERT(m); 244 245 need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && 246 (fault_flags & VM_FAULT_WIRE) == 0) || 247 (fault_flags & VM_FAULT_DIRTY) != 0; 248 249 vm_object_set_writeable_dirty(m->object); 250 251 /* 252 * If the fault is a write, we know that this page is being 253 * written NOW so dirty it explicitly to save on 254 * pmap_is_modified() calls later. 255 * 256 * Also, since the page is now dirty, we can possibly tell 257 * the pager to release any swap backing the page. 258 */ 259 if (need_dirty && vm_page_set_dirty(m) == 0) { 260 /* 261 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 262 * if the page is already dirty to prevent data written with 263 * the expectation of being synced from not being synced. 264 * Likewise if this entry does not request NOSYNC then make 265 * sure the page isn't marked NOSYNC. Applications sharing 266 * data should use the same flags to avoid ping ponging. 267 */ 268 if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) 269 vm_page_aflag_set(m, PGA_NOSYNC); 270 else 271 vm_page_aflag_clear(m, PGA_NOSYNC); 272 } 273 274 } 275 276 /* 277 * Unlocks fs.first_object and fs.map on success. 278 */ 279 static int 280 vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, 281 int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) 282 { 283 vm_page_t m, m_map; 284 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 285 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 286 VM_NRESERVLEVEL > 0 287 vm_page_t m_super; 288 int flags; 289 #endif 290 int psind, rv; 291 292 MPASS(fs->vp == NULL); 293 vm_object_busy(fs->first_object); 294 m = vm_page_lookup(fs->first_object, fs->first_pindex); 295 /* A busy page can be mapped for read|execute access. */ 296 if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && 297 vm_page_busied(m)) || !vm_page_all_valid(m)) { 298 rv = KERN_FAILURE; 299 goto out; 300 } 301 m_map = m; 302 psind = 0; 303 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 304 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 305 VM_NRESERVLEVEL > 0 306 if ((m->flags & PG_FICTITIOUS) == 0 && 307 (m_super = vm_reserv_to_superpage(m)) != NULL && 308 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 309 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 310 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 311 (pagesizes[m_super->psind] - 1)) && !wired && 312 pmap_ps_enabled(fs->map->pmap)) { 313 flags = PS_ALL_VALID; 314 if ((prot & VM_PROT_WRITE) != 0) { 315 /* 316 * Create a superpage mapping allowing write access 317 * only if none of the constituent pages are busy and 318 * all of them are already dirty (except possibly for 319 * the page that was faulted on). 320 */ 321 flags |= PS_NONE_BUSY; 322 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 323 flags |= PS_ALL_DIRTY; 324 } 325 if (vm_page_ps_test(m_super, flags, m)) { 326 m_map = m_super; 327 psind = m_super->psind; 328 vaddr = rounddown2(vaddr, pagesizes[psind]); 329 /* Preset the modified bit for dirty superpages. */ 330 if ((flags & PS_ALL_DIRTY) != 0) 331 fault_type |= VM_PROT_WRITE; 332 } 333 } 334 #endif 335 rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | 336 PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); 337 if (rv != KERN_SUCCESS) 338 goto out; 339 if (m_hold != NULL) { 340 *m_hold = m; 341 vm_page_wire(m); 342 } 343 vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags); 344 if (psind == 0 && !wired) 345 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 346 VM_OBJECT_RUNLOCK(fs->first_object); 347 vm_map_lookup_done(fs->map, fs->entry); 348 curthread->td_ru.ru_minflt++; 349 350 out: 351 vm_object_unbusy(fs->first_object); 352 return (rv); 353 } 354 355 static void 356 vm_fault_restore_map_lock(struct faultstate *fs) 357 { 358 359 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 360 MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 361 362 if (!vm_map_trylock_read(fs->map)) { 363 VM_OBJECT_WUNLOCK(fs->first_object); 364 vm_map_lock_read(fs->map); 365 VM_OBJECT_WLOCK(fs->first_object); 366 } 367 fs->lookup_still_valid = true; 368 } 369 370 static void 371 vm_fault_populate_check_page(vm_page_t m) 372 { 373 374 /* 375 * Check each page to ensure that the pager is obeying the 376 * interface: the page must be installed in the object, fully 377 * valid, and exclusively busied. 378 */ 379 MPASS(m != NULL); 380 MPASS(vm_page_all_valid(m)); 381 MPASS(vm_page_xbusied(m)); 382 } 383 384 static void 385 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 386 vm_pindex_t last) 387 { 388 vm_page_t m; 389 vm_pindex_t pidx; 390 391 VM_OBJECT_ASSERT_WLOCKED(object); 392 MPASS(first <= last); 393 for (pidx = first, m = vm_page_lookup(object, pidx); 394 pidx <= last; pidx++, m = vm_page_next(m)) { 395 vm_fault_populate_check_page(m); 396 vm_page_lock(m); 397 vm_page_deactivate(m); 398 vm_page_unlock(m); 399 vm_page_xunbusy(m); 400 } 401 } 402 403 static int 404 vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type, 405 int fault_flags, boolean_t wired, vm_page_t *m_hold) 406 { 407 struct mtx *m_mtx; 408 vm_offset_t vaddr; 409 vm_page_t m; 410 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 411 int i, npages, psind, rv; 412 413 MPASS(fs->object == fs->first_object); 414 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 415 MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 416 MPASS(fs->first_object->backing_object == NULL); 417 MPASS(fs->lookup_still_valid); 418 419 pager_first = OFF_TO_IDX(fs->entry->offset); 420 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 421 unlock_map(fs); 422 unlock_vp(fs); 423 424 /* 425 * Call the pager (driver) populate() method. 426 * 427 * There is no guarantee that the method will be called again 428 * if the current fault is for read, and a future fault is 429 * for write. Report the entry's maximum allowed protection 430 * to the driver. 431 */ 432 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 433 fault_type, fs->entry->max_protection, &pager_first, &pager_last); 434 435 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 436 if (rv == VM_PAGER_BAD) { 437 /* 438 * VM_PAGER_BAD is the backdoor for a pager to request 439 * normal fault handling. 440 */ 441 vm_fault_restore_map_lock(fs); 442 if (fs->map->timestamp != fs->map_generation) 443 return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 444 return (KERN_NOT_RECEIVER); 445 } 446 if (rv != VM_PAGER_OK) 447 return (KERN_FAILURE); /* AKA SIGSEGV */ 448 449 /* Ensure that the driver is obeying the interface. */ 450 MPASS(pager_first <= pager_last); 451 MPASS(fs->first_pindex <= pager_last); 452 MPASS(fs->first_pindex >= pager_first); 453 MPASS(pager_last < fs->first_object->size); 454 455 vm_fault_restore_map_lock(fs); 456 if (fs->map->timestamp != fs->map_generation) { 457 vm_fault_populate_cleanup(fs->first_object, pager_first, 458 pager_last); 459 return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 460 } 461 462 /* 463 * The map is unchanged after our last unlock. Process the fault. 464 * 465 * The range [pager_first, pager_last] that is given to the 466 * pager is only a hint. The pager may populate any range 467 * within the object that includes the requested page index. 468 * In case the pager expanded the range, clip it to fit into 469 * the map entry. 470 */ 471 map_first = OFF_TO_IDX(fs->entry->offset); 472 if (map_first > pager_first) { 473 vm_fault_populate_cleanup(fs->first_object, pager_first, 474 map_first - 1); 475 pager_first = map_first; 476 } 477 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 478 if (map_last < pager_last) { 479 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 480 pager_last); 481 pager_last = map_last; 482 } 483 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 484 pidx <= pager_last; 485 pidx += npages, m = vm_page_next(&m[npages - 1])) { 486 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 487 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 488 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 489 psind = m->psind; 490 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 491 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 492 !pmap_ps_enabled(fs->map->pmap) || wired)) 493 psind = 0; 494 #else 495 psind = 0; 496 #endif 497 npages = atop(pagesizes[psind]); 498 for (i = 0; i < npages; i++) { 499 vm_fault_populate_check_page(&m[i]); 500 vm_fault_dirty(fs->entry, &m[i], prot, fault_type, 501 fault_flags); 502 } 503 VM_OBJECT_WUNLOCK(fs->first_object); 504 rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | 505 (wired ? PMAP_ENTER_WIRED : 0), psind); 506 #if defined(__amd64__) 507 if (psind > 0 && rv == KERN_FAILURE) { 508 for (i = 0; i < npages; i++) { 509 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 510 &m[i], prot, fault_type | 511 (wired ? PMAP_ENTER_WIRED : 0), 0); 512 MPASS(rv == KERN_SUCCESS); 513 } 514 } 515 #else 516 MPASS(rv == KERN_SUCCESS); 517 #endif 518 VM_OBJECT_WLOCK(fs->first_object); 519 m_mtx = NULL; 520 for (i = 0; i < npages; i++) { 521 if ((fault_flags & VM_FAULT_WIRE) != 0) { 522 vm_page_wire(&m[i]); 523 } else { 524 vm_page_change_lock(&m[i], &m_mtx); 525 vm_page_activate(&m[i]); 526 } 527 if (m_hold != NULL && m[i].pindex == fs->first_pindex) { 528 *m_hold = &m[i]; 529 vm_page_wire(&m[i]); 530 } 531 vm_page_xunbusy(&m[i]); 532 } 533 if (m_mtx != NULL) 534 mtx_unlock(m_mtx); 535 } 536 curthread->td_ru.ru_majflt++; 537 return (KERN_SUCCESS); 538 } 539 540 static int prot_fault_translation; 541 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 542 &prot_fault_translation, 0, 543 "Control signal to deliver on protection fault"); 544 545 /* compat definition to keep common code for signal translation */ 546 #define UCODE_PAGEFLT 12 547 #ifdef T_PAGEFLT 548 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 549 #endif 550 551 /* 552 * vm_fault_trap: 553 * 554 * Handle a page fault occurring at the given address, 555 * requiring the given permissions, in the map specified. 556 * If successful, the page is inserted into the 557 * associated physical map. 558 * 559 * NOTE: the given address should be truncated to the 560 * proper page address. 561 * 562 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 563 * a standard error specifying why the fault is fatal is returned. 564 * 565 * The map in question must be referenced, and remains so. 566 * Caller may hold no locks. 567 */ 568 int 569 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 570 int fault_flags, int *signo, int *ucode) 571 { 572 int result; 573 574 MPASS(signo == NULL || ucode != NULL); 575 #ifdef KTRACE 576 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 577 ktrfault(vaddr, fault_type); 578 #endif 579 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 580 NULL); 581 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 582 result == KERN_INVALID_ADDRESS || 583 result == KERN_RESOURCE_SHORTAGE || 584 result == KERN_PROTECTION_FAILURE || 585 result == KERN_OUT_OF_BOUNDS, 586 ("Unexpected Mach error %d from vm_fault()", result)); 587 #ifdef KTRACE 588 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 589 ktrfaultend(result); 590 #endif 591 if (result != KERN_SUCCESS && signo != NULL) { 592 switch (result) { 593 case KERN_FAILURE: 594 case KERN_INVALID_ADDRESS: 595 *signo = SIGSEGV; 596 *ucode = SEGV_MAPERR; 597 break; 598 case KERN_RESOURCE_SHORTAGE: 599 *signo = SIGBUS; 600 *ucode = BUS_OOMERR; 601 break; 602 case KERN_OUT_OF_BOUNDS: 603 *signo = SIGBUS; 604 *ucode = BUS_OBJERR; 605 break; 606 case KERN_PROTECTION_FAILURE: 607 if (prot_fault_translation == 0) { 608 /* 609 * Autodetect. This check also covers 610 * the images without the ABI-tag ELF 611 * note. 612 */ 613 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 614 curproc->p_osrel >= P_OSREL_SIGSEGV) { 615 *signo = SIGSEGV; 616 *ucode = SEGV_ACCERR; 617 } else { 618 *signo = SIGBUS; 619 *ucode = UCODE_PAGEFLT; 620 } 621 } else if (prot_fault_translation == 1) { 622 /* Always compat mode. */ 623 *signo = SIGBUS; 624 *ucode = UCODE_PAGEFLT; 625 } else { 626 /* Always SIGSEGV mode. */ 627 *signo = SIGSEGV; 628 *ucode = SEGV_ACCERR; 629 } 630 break; 631 default: 632 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 633 result)); 634 break; 635 } 636 } 637 return (result); 638 } 639 640 static int 641 vm_fault_lock_vnode(struct faultstate *fs) 642 { 643 struct vnode *vp; 644 int error, locked; 645 646 if (fs->object->type != OBJT_VNODE) 647 return (KERN_SUCCESS); 648 vp = fs->object->handle; 649 if (vp == fs->vp) { 650 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 651 return (KERN_SUCCESS); 652 } 653 654 /* 655 * Perform an unlock in case the desired vnode changed while 656 * the map was unlocked during a retry. 657 */ 658 unlock_vp(fs); 659 660 locked = VOP_ISLOCKED(vp); 661 if (locked != LK_EXCLUSIVE) 662 locked = LK_SHARED; 663 664 /* 665 * We must not sleep acquiring the vnode lock while we have 666 * the page exclusive busied or the object's 667 * paging-in-progress count incremented. Otherwise, we could 668 * deadlock. 669 */ 670 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); 671 if (error == 0) { 672 fs->vp = vp; 673 return (KERN_SUCCESS); 674 } 675 676 vhold(vp); 677 unlock_and_deallocate(fs); 678 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); 679 vdrop(vp); 680 fs->vp = vp; 681 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 682 return (KERN_RESOURCE_SHORTAGE); 683 } 684 685 /* 686 * Wait/Retry if the page is busy. We have to do this if the page is 687 * either exclusive or shared busy because the vm_pager may be using 688 * read busy for pageouts (and even pageins if it is the vnode pager), 689 * and we could end up trying to pagein and pageout the same page 690 * simultaneously. 691 * 692 * We can theoretically allow the busy case on a read fault if the page 693 * is marked valid, but since such pages are typically already pmap'd, 694 * putting that special case in might be more effort then it is worth. 695 * We cannot under any circumstances mess around with a shared busied 696 * page except, perhaps, to pmap it. 697 */ 698 static void 699 vm_fault_busy_sleep(struct faultstate *fs) 700 { 701 /* 702 * Reference the page before unlocking and 703 * sleeping so that the page daemon is less 704 * likely to reclaim it. 705 */ 706 vm_page_aflag_set(fs->m, PGA_REFERENCED); 707 if (fs->object != fs->first_object) { 708 fault_page_release(&fs->first_m); 709 vm_object_pip_wakeup(fs->first_object); 710 } 711 vm_object_pip_wakeup(fs->object); 712 unlock_map(fs); 713 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 714 vm_page_sleep_if_busy(fs->m, "vmpfw"); 715 VM_OBJECT_WUNLOCK(fs->object); 716 VM_CNT_INC(v_intrans); 717 vm_object_deallocate(fs->first_object); 718 } 719 720 int 721 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 722 int fault_flags, vm_page_t *m_hold) 723 { 724 struct faultstate fs; 725 struct domainset *dset; 726 vm_object_t next_object, retry_object; 727 vm_offset_t e_end, e_start; 728 vm_pindex_t retry_pindex; 729 vm_prot_t prot, retry_prot; 730 int ahead, alloc_req, behind, cluster_offset, era, faultcount; 731 int nera, oom, result, rv; 732 u_char behavior; 733 boolean_t wired; /* Passed by reference. */ 734 bool dead, hardfault, is_first_object_locked; 735 736 VM_CNT_INC(v_vm_faults); 737 738 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 739 return (KERN_PROTECTION_FAILURE); 740 741 fs.vp = NULL; 742 faultcount = 0; 743 nera = -1; 744 hardfault = false; 745 746 RetryFault: 747 oom = 0; 748 RetryFault_oom: 749 750 /* 751 * Find the backing store object and offset into it to begin the 752 * search. 753 */ 754 fs.map = map; 755 result = vm_map_lookup(&fs.map, vaddr, fault_type | 756 VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object, 757 &fs.first_pindex, &prot, &wired); 758 if (result != KERN_SUCCESS) { 759 unlock_vp(&fs); 760 return (result); 761 } 762 763 fs.map_generation = fs.map->timestamp; 764 765 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 766 panic("%s: fault on nofault entry, addr: %#lx", 767 __func__, (u_long)vaddr); 768 } 769 770 if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION && 771 fs.entry->wiring_thread != curthread) { 772 vm_map_unlock_read(fs.map); 773 vm_map_lock(fs.map); 774 if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && 775 (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 776 unlock_vp(&fs); 777 fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 778 vm_map_unlock_and_wait(fs.map, 0); 779 } else 780 vm_map_unlock(fs.map); 781 goto RetryFault; 782 } 783 784 MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0); 785 786 if (wired) 787 fault_type = prot | (fault_type & VM_PROT_COPY); 788 else 789 KASSERT((fault_flags & VM_FAULT_WIRE) == 0, 790 ("!wired && VM_FAULT_WIRE")); 791 792 /* 793 * Try to avoid lock contention on the top-level object through 794 * special-case handling of some types of page faults, specifically, 795 * those that are mapping an existing page from the top-level object. 796 * Under this condition, a read lock on the object suffices, allowing 797 * multiple page faults of a similar type to run in parallel. 798 */ 799 if (fs.vp == NULL /* avoid locked vnode leak */ && 800 (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 801 VM_OBJECT_RLOCK(fs.first_object); 802 rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type, 803 fault_flags, wired, m_hold); 804 if (rv == KERN_SUCCESS) 805 return (rv); 806 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 807 VM_OBJECT_RUNLOCK(fs.first_object); 808 VM_OBJECT_WLOCK(fs.first_object); 809 } 810 } else { 811 VM_OBJECT_WLOCK(fs.first_object); 812 } 813 814 /* 815 * Make a reference to this object to prevent its disposal while we 816 * are messing with it. Once we have the reference, the map is free 817 * to be diddled. Since objects reference their shadows (and copies), 818 * they will stay around as well. 819 * 820 * Bump the paging-in-progress count to prevent size changes (e.g. 821 * truncation operations) during I/O. 822 */ 823 vm_object_reference_locked(fs.first_object); 824 vm_object_pip_add(fs.first_object, 1); 825 826 fs.lookup_still_valid = true; 827 828 fs.m = fs.first_m = NULL; 829 830 /* 831 * Search for the page at object/offset. 832 */ 833 fs.object = fs.first_object; 834 fs.pindex = fs.first_pindex; 835 while (TRUE) { 836 KASSERT(fs.m == NULL, 837 ("page still set %p at loop start", fs.m)); 838 /* 839 * If the object is marked for imminent termination, 840 * we retry here, since the collapse pass has raced 841 * with us. Otherwise, if we see terminally dead 842 * object, return fail. 843 */ 844 if ((fs.object->flags & OBJ_DEAD) != 0) { 845 dead = fs.object->type == OBJT_DEAD; 846 unlock_and_deallocate(&fs); 847 if (dead) 848 return (KERN_PROTECTION_FAILURE); 849 pause("vmf_de", 1); 850 goto RetryFault; 851 } 852 853 /* 854 * See if page is resident 855 */ 856 fs.m = vm_page_lookup(fs.object, fs.pindex); 857 if (fs.m != NULL) { 858 if (vm_page_tryxbusy(fs.m) == 0) { 859 vm_fault_busy_sleep(&fs); 860 goto RetryFault; 861 } 862 863 /* 864 * The page is marked busy for other processes and the 865 * pagedaemon. If it still isn't completely valid 866 * (readable), jump to readrest, else break-out ( we 867 * found the page ). 868 */ 869 if (!vm_page_all_valid(fs.m)) 870 goto readrest; 871 break; /* break to PAGE HAS BEEN FOUND */ 872 } 873 KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); 874 875 /* 876 * Page is not resident. If the pager might contain the page 877 * or this is the beginning of the search, allocate a new 878 * page. (Default objects are zero-fill, so there is no real 879 * pager for them.) 880 */ 881 if (fs.object->type != OBJT_DEFAULT || 882 fs.object == fs.first_object) { 883 if ((fs.object->flags & OBJ_SIZEVNLOCK) != 0) { 884 rv = vm_fault_lock_vnode(&fs); 885 MPASS(rv == KERN_SUCCESS || 886 rv == KERN_RESOURCE_SHORTAGE); 887 if (rv == KERN_RESOURCE_SHORTAGE) 888 goto RetryFault; 889 } 890 if (fs.pindex >= fs.object->size) { 891 unlock_and_deallocate(&fs); 892 return (KERN_OUT_OF_BOUNDS); 893 } 894 895 if (fs.object == fs.first_object && 896 (fs.first_object->flags & OBJ_POPULATE) != 0 && 897 fs.first_object->shadow_count == 0) { 898 rv = vm_fault_populate(&fs, prot, fault_type, 899 fault_flags, wired, m_hold); 900 switch (rv) { 901 case KERN_SUCCESS: 902 case KERN_FAILURE: 903 unlock_and_deallocate(&fs); 904 return (rv); 905 case KERN_RESOURCE_SHORTAGE: 906 unlock_and_deallocate(&fs); 907 goto RetryFault; 908 case KERN_NOT_RECEIVER: 909 /* 910 * Pager's populate() method 911 * returned VM_PAGER_BAD. 912 */ 913 break; 914 default: 915 panic("inconsistent return codes"); 916 } 917 } 918 919 /* 920 * Allocate a new page for this object/offset pair. 921 * 922 * Unlocked read of the p_flag is harmless. At 923 * worst, the P_KILLED might be not observed 924 * there, and allocation can fail, causing 925 * restart and new reading of the p_flag. 926 */ 927 dset = fs.object->domain.dr_policy; 928 if (dset == NULL) 929 dset = curthread->td_domain.dr_policy; 930 if (!vm_page_count_severe_set(&dset->ds_mask) || 931 P_KILLED(curproc)) { 932 #if VM_NRESERVLEVEL > 0 933 vm_object_color(fs.object, atop(vaddr) - 934 fs.pindex); 935 #endif 936 alloc_req = P_KILLED(curproc) ? 937 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 938 if (fs.object->type != OBJT_VNODE && 939 fs.object->backing_object == NULL) 940 alloc_req |= VM_ALLOC_ZERO; 941 fs.m = vm_page_alloc(fs.object, fs.pindex, 942 alloc_req); 943 } 944 if (fs.m == NULL) { 945 unlock_and_deallocate(&fs); 946 if (vm_pfault_oom_attempts < 0 || 947 oom < vm_pfault_oom_attempts) { 948 oom++; 949 vm_waitpfault(dset, 950 vm_pfault_oom_wait * hz); 951 goto RetryFault_oom; 952 } 953 if (bootverbose) 954 printf( 955 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 956 curproc->p_pid, curproc->p_comm); 957 vm_pageout_oom(VM_OOM_MEM_PF); 958 goto RetryFault; 959 } 960 } 961 962 readrest: 963 /* 964 * At this point, we have either allocated a new page or found 965 * an existing page that is only partially valid. 966 * 967 * We hold a reference on the current object and the page is 968 * exclusive busied. 969 */ 970 971 /* 972 * If the pager for the current object might have the page, 973 * then determine the number of additional pages to read and 974 * potentially reprioritize previously read pages for earlier 975 * reclamation. These operations should only be performed 976 * once per page fault. Even if the current pager doesn't 977 * have the page, the number of additional pages to read will 978 * apply to subsequent objects in the shadow chain. 979 */ 980 if (fs.object->type != OBJT_DEFAULT && nera == -1 && 981 !P_KILLED(curproc)) { 982 KASSERT(fs.lookup_still_valid, ("map unlocked")); 983 era = fs.entry->read_ahead; 984 behavior = vm_map_entry_behavior(fs.entry); 985 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 986 nera = 0; 987 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 988 nera = VM_FAULT_READ_AHEAD_MAX; 989 if (vaddr == fs.entry->next_read) 990 vm_fault_dontneed(&fs, vaddr, nera); 991 } else if (vaddr == fs.entry->next_read) { 992 /* 993 * This is a sequential fault. Arithmetically 994 * increase the requested number of pages in 995 * the read-ahead window. The requested 996 * number of pages is "# of sequential faults 997 * x (read ahead min + 1) + read ahead min" 998 */ 999 nera = VM_FAULT_READ_AHEAD_MIN; 1000 if (era > 0) { 1001 nera += era + 1; 1002 if (nera > VM_FAULT_READ_AHEAD_MAX) 1003 nera = VM_FAULT_READ_AHEAD_MAX; 1004 } 1005 if (era == VM_FAULT_READ_AHEAD_MAX) 1006 vm_fault_dontneed(&fs, vaddr, nera); 1007 } else { 1008 /* 1009 * This is a non-sequential fault. 1010 */ 1011 nera = 0; 1012 } 1013 if (era != nera) { 1014 /* 1015 * A read lock on the map suffices to update 1016 * the read ahead count safely. 1017 */ 1018 fs.entry->read_ahead = nera; 1019 } 1020 1021 /* 1022 * Prepare for unlocking the map. Save the map 1023 * entry's start and end addresses, which are used to 1024 * optimize the size of the pager operation below. 1025 * Even if the map entry's addresses change after 1026 * unlocking the map, using the saved addresses is 1027 * safe. 1028 */ 1029 e_start = fs.entry->start; 1030 e_end = fs.entry->end; 1031 } 1032 1033 /* 1034 * Call the pager to retrieve the page if there is a chance 1035 * that the pager has it, and potentially retrieve additional 1036 * pages at the same time. 1037 */ 1038 if (fs.object->type != OBJT_DEFAULT) { 1039 /* 1040 * Release the map lock before locking the vnode or 1041 * sleeping in the pager. (If the current object has 1042 * a shadow, then an earlier iteration of this loop 1043 * may have already unlocked the map.) 1044 */ 1045 unlock_map(&fs); 1046 1047 rv = vm_fault_lock_vnode(&fs); 1048 MPASS(rv == KERN_SUCCESS || 1049 rv == KERN_RESOURCE_SHORTAGE); 1050 if (rv == KERN_RESOURCE_SHORTAGE) 1051 goto RetryFault; 1052 KASSERT(fs.vp == NULL || !fs.map->system_map, 1053 ("vm_fault: vnode-backed object mapped by system map")); 1054 1055 /* 1056 * Page in the requested page and hint the pager, 1057 * that it may bring up surrounding pages. 1058 */ 1059 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1060 P_KILLED(curproc)) { 1061 behind = 0; 1062 ahead = 0; 1063 } else { 1064 /* Is this a sequential fault? */ 1065 if (nera > 0) { 1066 behind = 0; 1067 ahead = nera; 1068 } else { 1069 /* 1070 * Request a cluster of pages that is 1071 * aligned to a VM_FAULT_READ_DEFAULT 1072 * page offset boundary within the 1073 * object. Alignment to a page offset 1074 * boundary is more likely to coincide 1075 * with the underlying file system 1076 * block than alignment to a virtual 1077 * address boundary. 1078 */ 1079 cluster_offset = fs.pindex % 1080 VM_FAULT_READ_DEFAULT; 1081 behind = ulmin(cluster_offset, 1082 atop(vaddr - e_start)); 1083 ahead = VM_FAULT_READ_DEFAULT - 1 - 1084 cluster_offset; 1085 } 1086 ahead = ulmin(ahead, atop(e_end - vaddr) - 1); 1087 } 1088 rv = vm_pager_get_pages(fs.object, &fs.m, 1, 1089 &behind, &ahead); 1090 if (rv == VM_PAGER_OK) { 1091 faultcount = behind + 1 + ahead; 1092 hardfault = true; 1093 break; /* break to PAGE HAS BEEN FOUND */ 1094 } 1095 if (rv == VM_PAGER_ERROR) 1096 printf("vm_fault: pager read error, pid %d (%s)\n", 1097 curproc->p_pid, curproc->p_comm); 1098 1099 /* 1100 * If an I/O error occurred or the requested page was 1101 * outside the range of the pager, clean up and return 1102 * an error. 1103 */ 1104 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1105 fault_page_free(&fs.m); 1106 unlock_and_deallocate(&fs); 1107 return (KERN_OUT_OF_BOUNDS); 1108 } 1109 1110 } 1111 1112 /* 1113 * The requested page does not exist at this object/ 1114 * offset. Remove the invalid page from the object, 1115 * waking up anyone waiting for it, and continue on to 1116 * the next object. However, if this is the top-level 1117 * object, we must leave the busy page in place to 1118 * prevent another process from rushing past us, and 1119 * inserting the page in that object at the same time 1120 * that we are. 1121 */ 1122 if (fs.object == fs.first_object) { 1123 fs.first_m = fs.m; 1124 fs.m = NULL; 1125 } else 1126 fault_page_free(&fs.m); 1127 1128 /* 1129 * Move on to the next object. Lock the next object before 1130 * unlocking the current one. 1131 */ 1132 next_object = fs.object->backing_object; 1133 if (next_object == NULL) { 1134 /* 1135 * If there's no object left, fill the page in the top 1136 * object with zeros. 1137 */ 1138 if (fs.object != fs.first_object) { 1139 vm_object_pip_wakeup(fs.object); 1140 VM_OBJECT_WUNLOCK(fs.object); 1141 1142 fs.object = fs.first_object; 1143 fs.pindex = fs.first_pindex; 1144 VM_OBJECT_WLOCK(fs.object); 1145 } 1146 MPASS(fs.first_m != NULL); 1147 MPASS(fs.m == NULL); 1148 fs.m = fs.first_m; 1149 fs.first_m = NULL; 1150 1151 /* 1152 * Zero the page if necessary and mark it valid. 1153 */ 1154 if ((fs.m->flags & PG_ZERO) == 0) { 1155 pmap_zero_page(fs.m); 1156 } else { 1157 VM_CNT_INC(v_ozfod); 1158 } 1159 VM_CNT_INC(v_zfod); 1160 vm_page_valid(fs.m); 1161 /* Don't try to prefault neighboring pages. */ 1162 faultcount = 1; 1163 break; /* break to PAGE HAS BEEN FOUND */ 1164 } else { 1165 MPASS(fs.first_m != NULL); 1166 KASSERT(fs.object != next_object, 1167 ("object loop %p", next_object)); 1168 VM_OBJECT_WLOCK(next_object); 1169 vm_object_pip_add(next_object, 1); 1170 if (fs.object != fs.first_object) 1171 vm_object_pip_wakeup(fs.object); 1172 fs.pindex += 1173 OFF_TO_IDX(fs.object->backing_object_offset); 1174 VM_OBJECT_WUNLOCK(fs.object); 1175 fs.object = next_object; 1176 } 1177 } 1178 1179 vm_page_assert_xbusied(fs.m); 1180 1181 /* 1182 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 1183 * is held.] 1184 */ 1185 1186 /* 1187 * If the page is being written, but isn't already owned by the 1188 * top-level object, we have to copy it into a new page owned by the 1189 * top-level object. 1190 */ 1191 if (fs.object != fs.first_object) { 1192 /* 1193 * We only really need to copy if we want to write it. 1194 */ 1195 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1196 /* 1197 * This allows pages to be virtually copied from a 1198 * backing_object into the first_object, where the 1199 * backing object has no other refs to it, and cannot 1200 * gain any more refs. Instead of a bcopy, we just 1201 * move the page from the backing object to the 1202 * first object. Note that we must mark the page 1203 * dirty in the first object so that it will go out 1204 * to swap when needed. 1205 */ 1206 is_first_object_locked = false; 1207 if ( 1208 /* 1209 * Only one shadow object 1210 */ 1211 (fs.object->shadow_count == 1) && 1212 /* 1213 * No COW refs, except us 1214 */ 1215 (fs.object->ref_count == 1) && 1216 /* 1217 * No one else can look this object up 1218 */ 1219 (fs.object->handle == NULL) && 1220 /* 1221 * No other ways to look the object up 1222 */ 1223 ((fs.object->flags & OBJ_ANON) != 0) && 1224 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && 1225 /* 1226 * We don't chase down the shadow chain 1227 */ 1228 fs.object == fs.first_object->backing_object) { 1229 1230 /* 1231 * Remove but keep xbusy for replace. fs.m is 1232 * moved into fs.first_object and left busy 1233 * while fs.first_m is conditionally freed. 1234 */ 1235 vm_page_remove_xbusy(fs.m); 1236 vm_page_replace(fs.m, fs.first_object, 1237 fs.first_pindex, fs.first_m); 1238 vm_page_dirty(fs.m); 1239 #if VM_NRESERVLEVEL > 0 1240 /* 1241 * Rename the reservation. 1242 */ 1243 vm_reserv_rename(fs.m, fs.first_object, 1244 fs.object, OFF_TO_IDX( 1245 fs.first_object->backing_object_offset)); 1246 #endif 1247 VM_OBJECT_WUNLOCK(fs.object); 1248 fs.first_m = fs.m; 1249 fs.m = NULL; 1250 VM_CNT_INC(v_cow_optim); 1251 } else { 1252 VM_OBJECT_WUNLOCK(fs.object); 1253 /* 1254 * Oh, well, lets copy it. 1255 */ 1256 pmap_copy_page(fs.m, fs.first_m); 1257 vm_page_valid(fs.first_m); 1258 if (wired && (fault_flags & 1259 VM_FAULT_WIRE) == 0) { 1260 vm_page_wire(fs.first_m); 1261 vm_page_unwire(fs.m, PQ_INACTIVE); 1262 } 1263 /* 1264 * We no longer need the old page or object. 1265 */ 1266 fault_page_release(&fs.m); 1267 } 1268 /* 1269 * fs.object != fs.first_object due to above 1270 * conditional 1271 */ 1272 vm_object_pip_wakeup(fs.object); 1273 1274 /* 1275 * We only try to prefault read-only mappings to the 1276 * neighboring pages when this copy-on-write fault is 1277 * a hard fault. In other cases, trying to prefault 1278 * is typically wasted effort. 1279 */ 1280 if (faultcount == 0) 1281 faultcount = 1; 1282 1283 /* 1284 * Only use the new page below... 1285 */ 1286 fs.object = fs.first_object; 1287 fs.pindex = fs.first_pindex; 1288 fs.m = fs.first_m; 1289 if (!is_first_object_locked) 1290 VM_OBJECT_WLOCK(fs.object); 1291 VM_CNT_INC(v_cow_faults); 1292 curthread->td_cow++; 1293 } else { 1294 prot &= ~VM_PROT_WRITE; 1295 } 1296 } 1297 1298 /* 1299 * We must verify that the maps have not changed since our last 1300 * lookup. 1301 */ 1302 if (!fs.lookup_still_valid) { 1303 if (!vm_map_trylock_read(fs.map)) { 1304 unlock_and_deallocate(&fs); 1305 goto RetryFault; 1306 } 1307 fs.lookup_still_valid = true; 1308 if (fs.map->timestamp != fs.map_generation) { 1309 result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 1310 &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 1311 1312 /* 1313 * If we don't need the page any longer, put it on the inactive 1314 * list (the easiest thing to do here). If no one needs it, 1315 * pageout will grab it eventually. 1316 */ 1317 if (result != KERN_SUCCESS) { 1318 unlock_and_deallocate(&fs); 1319 1320 /* 1321 * If retry of map lookup would have blocked then 1322 * retry fault from start. 1323 */ 1324 if (result == KERN_FAILURE) 1325 goto RetryFault; 1326 return (result); 1327 } 1328 if ((retry_object != fs.first_object) || 1329 (retry_pindex != fs.first_pindex)) { 1330 unlock_and_deallocate(&fs); 1331 goto RetryFault; 1332 } 1333 1334 /* 1335 * Check whether the protection has changed or the object has 1336 * been copied while we left the map unlocked. Changing from 1337 * read to write permission is OK - we leave the page 1338 * write-protected, and catch the write fault. Changing from 1339 * write to read permission means that we can't mark the page 1340 * write-enabled after all. 1341 */ 1342 prot &= retry_prot; 1343 fault_type &= retry_prot; 1344 if (prot == 0) { 1345 unlock_and_deallocate(&fs); 1346 goto RetryFault; 1347 } 1348 1349 /* Reassert because wired may have changed. */ 1350 KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0, 1351 ("!wired && VM_FAULT_WIRE")); 1352 } 1353 } 1354 1355 /* 1356 * If the page was filled by a pager, save the virtual address that 1357 * should be faulted on next under a sequential access pattern to the 1358 * map entry. A read lock on the map suffices to update this address 1359 * safely. 1360 */ 1361 if (hardfault) 1362 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1363 1364 vm_page_assert_xbusied(fs.m); 1365 vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags); 1366 1367 /* 1368 * Page must be completely valid or it is not fit to 1369 * map into user space. vm_pager_get_pages() ensures this. 1370 */ 1371 KASSERT(vm_page_all_valid(fs.m), 1372 ("vm_fault: page %p partially invalid", fs.m)); 1373 VM_OBJECT_WUNLOCK(fs.object); 1374 1375 /* 1376 * Put this page into the physical map. We had to do the unlock above 1377 * because pmap_enter() may sleep. We don't put the page 1378 * back on the active queue until later so that the pageout daemon 1379 * won't find it (yet). 1380 */ 1381 pmap_enter(fs.map->pmap, vaddr, fs.m, prot, 1382 fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); 1383 if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && 1384 wired == 0) 1385 vm_fault_prefault(&fs, vaddr, 1386 faultcount > 0 ? behind : PFBAK, 1387 faultcount > 0 ? ahead : PFFOR, false); 1388 1389 /* 1390 * If the page is not wired down, then put it where the pageout daemon 1391 * can find it. 1392 */ 1393 if ((fault_flags & VM_FAULT_WIRE) != 0) { 1394 vm_page_wire(fs.m); 1395 } else { 1396 vm_page_lock(fs.m); 1397 vm_page_activate(fs.m); 1398 vm_page_unlock(fs.m); 1399 } 1400 if (m_hold != NULL) { 1401 *m_hold = fs.m; 1402 vm_page_wire(fs.m); 1403 } 1404 vm_page_xunbusy(fs.m); 1405 fs.m = NULL; 1406 1407 /* 1408 * Unlock everything, and return 1409 */ 1410 fault_deallocate(&fs); 1411 if (hardfault) { 1412 VM_CNT_INC(v_io_faults); 1413 curthread->td_ru.ru_majflt++; 1414 #ifdef RACCT 1415 if (racct_enable && fs.object->type == OBJT_VNODE) { 1416 PROC_LOCK(curproc); 1417 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1418 racct_add_force(curproc, RACCT_WRITEBPS, 1419 PAGE_SIZE + behind * PAGE_SIZE); 1420 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1421 } else { 1422 racct_add_force(curproc, RACCT_READBPS, 1423 PAGE_SIZE + ahead * PAGE_SIZE); 1424 racct_add_force(curproc, RACCT_READIOPS, 1); 1425 } 1426 PROC_UNLOCK(curproc); 1427 } 1428 #endif 1429 } else 1430 curthread->td_ru.ru_minflt++; 1431 1432 return (KERN_SUCCESS); 1433 } 1434 1435 /* 1436 * Speed up the reclamation of pages that precede the faulting pindex within 1437 * the first object of the shadow chain. Essentially, perform the equivalent 1438 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1439 * the faulting pindex by the cluster size when the pages read by vm_fault() 1440 * cross a cluster-size boundary. The cluster size is the greater of the 1441 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1442 * 1443 * When "fs->first_object" is a shadow object, the pages in the backing object 1444 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1445 * function must only be concerned with pages in the first object. 1446 */ 1447 static void 1448 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1449 { 1450 vm_map_entry_t entry; 1451 vm_object_t first_object, object; 1452 vm_offset_t end, start; 1453 vm_page_t m, m_next; 1454 vm_pindex_t pend, pstart; 1455 vm_size_t size; 1456 1457 object = fs->object; 1458 VM_OBJECT_ASSERT_WLOCKED(object); 1459 first_object = fs->first_object; 1460 if (first_object != object) { 1461 if (!VM_OBJECT_TRYWLOCK(first_object)) { 1462 VM_OBJECT_WUNLOCK(object); 1463 VM_OBJECT_WLOCK(first_object); 1464 VM_OBJECT_WLOCK(object); 1465 } 1466 } 1467 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1468 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1469 size = VM_FAULT_DONTNEED_MIN; 1470 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1471 size = pagesizes[1]; 1472 end = rounddown2(vaddr, size); 1473 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1474 (entry = fs->entry)->start < end) { 1475 if (end - entry->start < size) 1476 start = entry->start; 1477 else 1478 start = end - size; 1479 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1480 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1481 entry->start); 1482 m_next = vm_page_find_least(first_object, pstart); 1483 pend = OFF_TO_IDX(entry->offset) + atop(end - 1484 entry->start); 1485 while ((m = m_next) != NULL && m->pindex < pend) { 1486 m_next = TAILQ_NEXT(m, listq); 1487 if (!vm_page_all_valid(m) || 1488 vm_page_busied(m)) 1489 continue; 1490 1491 /* 1492 * Don't clear PGA_REFERENCED, since it would 1493 * likely represent a reference by a different 1494 * process. 1495 * 1496 * Typically, at this point, prefetched pages 1497 * are still in the inactive queue. Only 1498 * pages that triggered page faults are in the 1499 * active queue. 1500 */ 1501 vm_page_lock(m); 1502 if (!vm_page_inactive(m)) 1503 vm_page_deactivate(m); 1504 vm_page_unlock(m); 1505 } 1506 } 1507 } 1508 if (first_object != object) 1509 VM_OBJECT_WUNLOCK(first_object); 1510 } 1511 1512 /* 1513 * vm_fault_prefault provides a quick way of clustering 1514 * pagefaults into a processes address space. It is a "cousin" 1515 * of vm_map_pmap_enter, except it runs at page fault time instead 1516 * of mmap time. 1517 */ 1518 static void 1519 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1520 int backward, int forward, bool obj_locked) 1521 { 1522 pmap_t pmap; 1523 vm_map_entry_t entry; 1524 vm_object_t backing_object, lobject; 1525 vm_offset_t addr, starta; 1526 vm_pindex_t pindex; 1527 vm_page_t m; 1528 int i; 1529 1530 pmap = fs->map->pmap; 1531 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1532 return; 1533 1534 entry = fs->entry; 1535 1536 if (addra < backward * PAGE_SIZE) { 1537 starta = entry->start; 1538 } else { 1539 starta = addra - backward * PAGE_SIZE; 1540 if (starta < entry->start) 1541 starta = entry->start; 1542 } 1543 1544 /* 1545 * Generate the sequence of virtual addresses that are candidates for 1546 * prefaulting in an outward spiral from the faulting virtual address, 1547 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1548 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1549 * If the candidate address doesn't have a backing physical page, then 1550 * the loop immediately terminates. 1551 */ 1552 for (i = 0; i < 2 * imax(backward, forward); i++) { 1553 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1554 PAGE_SIZE); 1555 if (addr > addra + forward * PAGE_SIZE) 1556 addr = 0; 1557 1558 if (addr < starta || addr >= entry->end) 1559 continue; 1560 1561 if (!pmap_is_prefaultable(pmap, addr)) 1562 continue; 1563 1564 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1565 lobject = entry->object.vm_object; 1566 if (!obj_locked) 1567 VM_OBJECT_RLOCK(lobject); 1568 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1569 lobject->type == OBJT_DEFAULT && 1570 (backing_object = lobject->backing_object) != NULL) { 1571 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1572 0, ("vm_fault_prefault: unaligned object offset")); 1573 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1574 VM_OBJECT_RLOCK(backing_object); 1575 if (!obj_locked || lobject != entry->object.vm_object) 1576 VM_OBJECT_RUNLOCK(lobject); 1577 lobject = backing_object; 1578 } 1579 if (m == NULL) { 1580 if (!obj_locked || lobject != entry->object.vm_object) 1581 VM_OBJECT_RUNLOCK(lobject); 1582 break; 1583 } 1584 if (vm_page_all_valid(m) && 1585 (m->flags & PG_FICTITIOUS) == 0) 1586 pmap_enter_quick(pmap, addr, m, entry->protection); 1587 if (!obj_locked || lobject != entry->object.vm_object) 1588 VM_OBJECT_RUNLOCK(lobject); 1589 } 1590 } 1591 1592 /* 1593 * Hold each of the physical pages that are mapped by the specified range of 1594 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1595 * and allow the specified types of access, "prot". If all of the implied 1596 * pages are successfully held, then the number of held pages is returned 1597 * together with pointers to those pages in the array "ma". However, if any 1598 * of the pages cannot be held, -1 is returned. 1599 */ 1600 int 1601 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1602 vm_prot_t prot, vm_page_t *ma, int max_count) 1603 { 1604 vm_offset_t end, va; 1605 vm_page_t *mp; 1606 int count; 1607 boolean_t pmap_failed; 1608 1609 if (len == 0) 1610 return (0); 1611 end = round_page(addr + len); 1612 addr = trunc_page(addr); 1613 1614 /* 1615 * Check for illegal addresses. 1616 */ 1617 if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) 1618 return (-1); 1619 1620 if (atop(end - addr) > max_count) 1621 panic("vm_fault_quick_hold_pages: count > max_count"); 1622 count = atop(end - addr); 1623 1624 /* 1625 * Most likely, the physical pages are resident in the pmap, so it is 1626 * faster to try pmap_extract_and_hold() first. 1627 */ 1628 pmap_failed = FALSE; 1629 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1630 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1631 if (*mp == NULL) 1632 pmap_failed = TRUE; 1633 else if ((prot & VM_PROT_WRITE) != 0 && 1634 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1635 /* 1636 * Explicitly dirty the physical page. Otherwise, the 1637 * caller's changes may go unnoticed because they are 1638 * performed through an unmanaged mapping or by a DMA 1639 * operation. 1640 * 1641 * The object lock is not held here. 1642 * See vm_page_clear_dirty_mask(). 1643 */ 1644 vm_page_dirty(*mp); 1645 } 1646 } 1647 if (pmap_failed) { 1648 /* 1649 * One or more pages could not be held by the pmap. Either no 1650 * page was mapped at the specified virtual address or that 1651 * mapping had insufficient permissions. Attempt to fault in 1652 * and hold these pages. 1653 * 1654 * If vm_fault_disable_pagefaults() was called, 1655 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1656 * acquire MD VM locks, which means we must not call 1657 * vm_fault(). Some (out of tree) callers mark 1658 * too wide a code area with vm_fault_disable_pagefaults() 1659 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1660 * the proper behaviour explicitly. 1661 */ 1662 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1663 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1664 goto error; 1665 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1666 if (*mp == NULL && vm_fault(map, va, prot, 1667 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1668 goto error; 1669 } 1670 return (count); 1671 error: 1672 for (mp = ma; mp < ma + count; mp++) 1673 if (*mp != NULL) 1674 vm_page_unwire(*mp, PQ_INACTIVE); 1675 return (-1); 1676 } 1677 1678 /* 1679 * Routine: 1680 * vm_fault_copy_entry 1681 * Function: 1682 * Create new shadow object backing dst_entry with private copy of 1683 * all underlying pages. When src_entry is equal to dst_entry, 1684 * function implements COW for wired-down map entry. Otherwise, 1685 * it forks wired entry into dst_map. 1686 * 1687 * In/out conditions: 1688 * The source and destination maps must be locked for write. 1689 * The source map entry must be wired down (or be a sharing map 1690 * entry corresponding to a main map entry that is wired down). 1691 */ 1692 void 1693 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1694 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1695 vm_ooffset_t *fork_charge) 1696 { 1697 vm_object_t backing_object, dst_object, object, src_object; 1698 vm_pindex_t dst_pindex, pindex, src_pindex; 1699 vm_prot_t access, prot; 1700 vm_offset_t vaddr; 1701 vm_page_t dst_m; 1702 vm_page_t src_m; 1703 boolean_t upgrade; 1704 1705 #ifdef lint 1706 src_map++; 1707 #endif /* lint */ 1708 1709 upgrade = src_entry == dst_entry; 1710 access = prot = dst_entry->protection; 1711 1712 src_object = src_entry->object.vm_object; 1713 src_pindex = OFF_TO_IDX(src_entry->offset); 1714 1715 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1716 dst_object = src_object; 1717 vm_object_reference(dst_object); 1718 } else { 1719 /* 1720 * Create the top-level object for the destination entry. 1721 * Doesn't actually shadow anything - we copy the pages 1722 * directly. 1723 */ 1724 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1725 dst_entry->start), NULL, NULL, 0); 1726 #if VM_NRESERVLEVEL > 0 1727 dst_object->flags |= OBJ_COLORED; 1728 dst_object->pg_color = atop(dst_entry->start); 1729 #endif 1730 dst_object->domain = src_object->domain; 1731 dst_object->charge = dst_entry->end - dst_entry->start; 1732 } 1733 1734 VM_OBJECT_WLOCK(dst_object); 1735 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1736 ("vm_fault_copy_entry: vm_object not NULL")); 1737 if (src_object != dst_object) { 1738 dst_entry->object.vm_object = dst_object; 1739 dst_entry->offset = 0; 1740 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1741 } 1742 if (fork_charge != NULL) { 1743 KASSERT(dst_entry->cred == NULL, 1744 ("vm_fault_copy_entry: leaked swp charge")); 1745 dst_object->cred = curthread->td_ucred; 1746 crhold(dst_object->cred); 1747 *fork_charge += dst_object->charge; 1748 } else if ((dst_object->type == OBJT_DEFAULT || 1749 dst_object->type == OBJT_SWAP) && 1750 dst_object->cred == NULL) { 1751 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1752 dst_entry)); 1753 dst_object->cred = dst_entry->cred; 1754 dst_entry->cred = NULL; 1755 } 1756 1757 /* 1758 * If not an upgrade, then enter the mappings in the pmap as 1759 * read and/or execute accesses. Otherwise, enter them as 1760 * write accesses. 1761 * 1762 * A writeable large page mapping is only created if all of 1763 * the constituent small page mappings are modified. Marking 1764 * PTEs as modified on inception allows promotion to happen 1765 * without taking potentially large number of soft faults. 1766 */ 1767 if (!upgrade) 1768 access &= ~VM_PROT_WRITE; 1769 1770 /* 1771 * Loop through all of the virtual pages within the entry's 1772 * range, copying each page from the source object to the 1773 * destination object. Since the source is wired, those pages 1774 * must exist. In contrast, the destination is pageable. 1775 * Since the destination object doesn't share any backing storage 1776 * with the source object, all of its pages must be dirtied, 1777 * regardless of whether they can be written. 1778 */ 1779 for (vaddr = dst_entry->start, dst_pindex = 0; 1780 vaddr < dst_entry->end; 1781 vaddr += PAGE_SIZE, dst_pindex++) { 1782 again: 1783 /* 1784 * Find the page in the source object, and copy it in. 1785 * Because the source is wired down, the page will be 1786 * in memory. 1787 */ 1788 if (src_object != dst_object) 1789 VM_OBJECT_RLOCK(src_object); 1790 object = src_object; 1791 pindex = src_pindex + dst_pindex; 1792 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1793 (backing_object = object->backing_object) != NULL) { 1794 /* 1795 * Unless the source mapping is read-only or 1796 * it is presently being upgraded from 1797 * read-only, the first object in the shadow 1798 * chain should provide all of the pages. In 1799 * other words, this loop body should never be 1800 * executed when the source mapping is already 1801 * read/write. 1802 */ 1803 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1804 upgrade, 1805 ("vm_fault_copy_entry: main object missing page")); 1806 1807 VM_OBJECT_RLOCK(backing_object); 1808 pindex += OFF_TO_IDX(object->backing_object_offset); 1809 if (object != dst_object) 1810 VM_OBJECT_RUNLOCK(object); 1811 object = backing_object; 1812 } 1813 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1814 1815 if (object != dst_object) { 1816 /* 1817 * Allocate a page in the destination object. 1818 */ 1819 dst_m = vm_page_alloc(dst_object, (src_object == 1820 dst_object ? src_pindex : 0) + dst_pindex, 1821 VM_ALLOC_NORMAL); 1822 if (dst_m == NULL) { 1823 VM_OBJECT_WUNLOCK(dst_object); 1824 VM_OBJECT_RUNLOCK(object); 1825 vm_wait(dst_object); 1826 VM_OBJECT_WLOCK(dst_object); 1827 goto again; 1828 } 1829 pmap_copy_page(src_m, dst_m); 1830 VM_OBJECT_RUNLOCK(object); 1831 dst_m->dirty = dst_m->valid = src_m->valid; 1832 } else { 1833 dst_m = src_m; 1834 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 1835 goto again; 1836 if (dst_m->pindex >= dst_object->size) { 1837 /* 1838 * We are upgrading. Index can occur 1839 * out of bounds if the object type is 1840 * vnode and the file was truncated. 1841 */ 1842 vm_page_xunbusy(dst_m); 1843 break; 1844 } 1845 } 1846 VM_OBJECT_WUNLOCK(dst_object); 1847 1848 /* 1849 * Enter it in the pmap. If a wired, copy-on-write 1850 * mapping is being replaced by a write-enabled 1851 * mapping, then wire that new mapping. 1852 * 1853 * The page can be invalid if the user called 1854 * msync(MS_INVALIDATE) or truncated the backing vnode 1855 * or shared memory object. In this case, do not 1856 * insert it into pmap, but still do the copy so that 1857 * all copies of the wired map entry have similar 1858 * backing pages. 1859 */ 1860 if (vm_page_all_valid(dst_m)) { 1861 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 1862 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 1863 } 1864 1865 /* 1866 * Mark it no longer busy, and put it on the active list. 1867 */ 1868 VM_OBJECT_WLOCK(dst_object); 1869 1870 if (upgrade) { 1871 if (src_m != dst_m) { 1872 vm_page_unwire(src_m, PQ_INACTIVE); 1873 vm_page_wire(dst_m); 1874 } else { 1875 KASSERT(vm_page_wired(dst_m), 1876 ("dst_m %p is not wired", dst_m)); 1877 } 1878 } else { 1879 vm_page_lock(dst_m); 1880 vm_page_activate(dst_m); 1881 vm_page_unlock(dst_m); 1882 } 1883 vm_page_xunbusy(dst_m); 1884 } 1885 VM_OBJECT_WUNLOCK(dst_object); 1886 if (upgrade) { 1887 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1888 vm_object_deallocate(src_object); 1889 } 1890 } 1891 1892 /* 1893 * Block entry into the machine-independent layer's page fault handler by 1894 * the calling thread. Subsequent calls to vm_fault() by that thread will 1895 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 1896 * spurious page faults. 1897 */ 1898 int 1899 vm_fault_disable_pagefaults(void) 1900 { 1901 1902 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 1903 } 1904 1905 void 1906 vm_fault_enable_pagefaults(int save) 1907 { 1908 1909 curthread_pflags_restore(save); 1910 } 1911