1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 /* Fault parameters. */ 124 vm_offset_t vaddr; 125 vm_page_t *m_hold; 126 vm_prot_t fault_type; 127 vm_prot_t prot; 128 int fault_flags; 129 int oom; 130 boolean_t wired; 131 132 /* Page reference for cow. */ 133 vm_page_t m_cow; 134 135 /* Current object. */ 136 vm_object_t object; 137 vm_pindex_t pindex; 138 vm_page_t m; 139 140 /* Top-level map object. */ 141 vm_object_t first_object; 142 vm_pindex_t first_pindex; 143 vm_page_t first_m; 144 145 /* Map state. */ 146 vm_map_t map; 147 vm_map_entry_t entry; 148 int map_generation; 149 bool lookup_still_valid; 150 151 /* Vnode if locked. */ 152 struct vnode *vp; 153 }; 154 155 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 156 int ahead); 157 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 158 int backward, int forward, bool obj_locked); 159 160 static int vm_pfault_oom_attempts = 3; 161 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 162 &vm_pfault_oom_attempts, 0, 163 "Number of page allocation attempts in page fault handler before it " 164 "triggers OOM handling"); 165 166 static int vm_pfault_oom_wait = 10; 167 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 168 &vm_pfault_oom_wait, 0, 169 "Number of seconds to wait for free pages before retrying " 170 "the page fault handler"); 171 172 static inline void 173 fault_page_release(vm_page_t *mp) 174 { 175 vm_page_t m; 176 177 m = *mp; 178 if (m != NULL) { 179 /* 180 * We are likely to loop around again and attempt to busy 181 * this page. Deactivating it leaves it available for 182 * pageout while optimizing fault restarts. 183 */ 184 vm_page_deactivate(m); 185 vm_page_xunbusy(m); 186 *mp = NULL; 187 } 188 } 189 190 static inline void 191 fault_page_free(vm_page_t *mp) 192 { 193 vm_page_t m; 194 195 m = *mp; 196 if (m != NULL) { 197 VM_OBJECT_ASSERT_WLOCKED(m->object); 198 if (!vm_page_wired(m)) 199 vm_page_free(m); 200 else 201 vm_page_xunbusy(m); 202 *mp = NULL; 203 } 204 } 205 206 static inline void 207 unlock_map(struct faultstate *fs) 208 { 209 210 if (fs->lookup_still_valid) { 211 vm_map_lookup_done(fs->map, fs->entry); 212 fs->lookup_still_valid = false; 213 } 214 } 215 216 static void 217 unlock_vp(struct faultstate *fs) 218 { 219 220 if (fs->vp != NULL) { 221 vput(fs->vp); 222 fs->vp = NULL; 223 } 224 } 225 226 static void 227 fault_deallocate(struct faultstate *fs) 228 { 229 230 fault_page_release(&fs->m_cow); 231 fault_page_release(&fs->m); 232 vm_object_pip_wakeup(fs->object); 233 if (fs->object != fs->first_object) { 234 VM_OBJECT_WLOCK(fs->first_object); 235 fault_page_free(&fs->first_m); 236 VM_OBJECT_WUNLOCK(fs->first_object); 237 vm_object_pip_wakeup(fs->first_object); 238 } 239 vm_object_deallocate(fs->first_object); 240 unlock_map(fs); 241 unlock_vp(fs); 242 } 243 244 static void 245 unlock_and_deallocate(struct faultstate *fs) 246 { 247 248 VM_OBJECT_WUNLOCK(fs->object); 249 fault_deallocate(fs); 250 } 251 252 static void 253 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 254 { 255 bool need_dirty; 256 257 if (((fs->prot & VM_PROT_WRITE) == 0 && 258 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 259 (m->oflags & VPO_UNMANAGED) != 0) 260 return; 261 262 VM_PAGE_OBJECT_BUSY_ASSERT(m); 263 264 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 265 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 266 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 267 268 vm_object_set_writeable_dirty(m->object); 269 270 /* 271 * If the fault is a write, we know that this page is being 272 * written NOW so dirty it explicitly to save on 273 * pmap_is_modified() calls later. 274 * 275 * Also, since the page is now dirty, we can possibly tell 276 * the pager to release any swap backing the page. 277 */ 278 if (need_dirty && vm_page_set_dirty(m) == 0) { 279 /* 280 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 281 * if the page is already dirty to prevent data written with 282 * the expectation of being synced from not being synced. 283 * Likewise if this entry does not request NOSYNC then make 284 * sure the page isn't marked NOSYNC. Applications sharing 285 * data should use the same flags to avoid ping ponging. 286 */ 287 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 288 vm_page_aflag_set(m, PGA_NOSYNC); 289 else 290 vm_page_aflag_clear(m, PGA_NOSYNC); 291 } 292 293 } 294 295 /* 296 * Unlocks fs.first_object and fs.map on success. 297 */ 298 static int 299 vm_fault_soft_fast(struct faultstate *fs) 300 { 301 vm_page_t m, m_map; 302 #if VM_NRESERVLEVEL > 0 303 vm_page_t m_super; 304 int flags; 305 #endif 306 int psind, rv; 307 vm_offset_t vaddr; 308 309 MPASS(fs->vp == NULL); 310 vaddr = fs->vaddr; 311 vm_object_busy(fs->first_object); 312 m = vm_page_lookup(fs->first_object, fs->first_pindex); 313 /* A busy page can be mapped for read|execute access. */ 314 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 315 vm_page_busied(m)) || !vm_page_all_valid(m)) { 316 rv = KERN_FAILURE; 317 goto out; 318 } 319 m_map = m; 320 psind = 0; 321 #if VM_NRESERVLEVEL > 0 322 if ((m->flags & PG_FICTITIOUS) == 0 && 323 (m_super = vm_reserv_to_superpage(m)) != NULL && 324 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 325 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 326 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 327 (pagesizes[m_super->psind] - 1)) && !fs->wired && 328 pmap_ps_enabled(fs->map->pmap)) { 329 flags = PS_ALL_VALID; 330 if ((fs->prot & VM_PROT_WRITE) != 0) { 331 /* 332 * Create a superpage mapping allowing write access 333 * only if none of the constituent pages are busy and 334 * all of them are already dirty (except possibly for 335 * the page that was faulted on). 336 */ 337 flags |= PS_NONE_BUSY; 338 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 339 flags |= PS_ALL_DIRTY; 340 } 341 if (vm_page_ps_test(m_super, flags, m)) { 342 m_map = m_super; 343 psind = m_super->psind; 344 vaddr = rounddown2(vaddr, pagesizes[psind]); 345 /* Preset the modified bit for dirty superpages. */ 346 if ((flags & PS_ALL_DIRTY) != 0) 347 fs->fault_type |= VM_PROT_WRITE; 348 } 349 } 350 #endif 351 rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 352 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 353 if (rv != KERN_SUCCESS) 354 goto out; 355 if (fs->m_hold != NULL) { 356 (*fs->m_hold) = m; 357 vm_page_wire(m); 358 } 359 if (psind == 0 && !fs->wired) 360 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 361 VM_OBJECT_RUNLOCK(fs->first_object); 362 vm_fault_dirty(fs, m); 363 vm_map_lookup_done(fs->map, fs->entry); 364 curthread->td_ru.ru_minflt++; 365 366 out: 367 vm_object_unbusy(fs->first_object); 368 return (rv); 369 } 370 371 static void 372 vm_fault_restore_map_lock(struct faultstate *fs) 373 { 374 375 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 376 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 377 378 if (!vm_map_trylock_read(fs->map)) { 379 VM_OBJECT_WUNLOCK(fs->first_object); 380 vm_map_lock_read(fs->map); 381 VM_OBJECT_WLOCK(fs->first_object); 382 } 383 fs->lookup_still_valid = true; 384 } 385 386 static void 387 vm_fault_populate_check_page(vm_page_t m) 388 { 389 390 /* 391 * Check each page to ensure that the pager is obeying the 392 * interface: the page must be installed in the object, fully 393 * valid, and exclusively busied. 394 */ 395 MPASS(m != NULL); 396 MPASS(vm_page_all_valid(m)); 397 MPASS(vm_page_xbusied(m)); 398 } 399 400 static void 401 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 402 vm_pindex_t last) 403 { 404 vm_page_t m; 405 vm_pindex_t pidx; 406 407 VM_OBJECT_ASSERT_WLOCKED(object); 408 MPASS(first <= last); 409 for (pidx = first, m = vm_page_lookup(object, pidx); 410 pidx <= last; pidx++, m = vm_page_next(m)) { 411 vm_fault_populate_check_page(m); 412 vm_page_deactivate(m); 413 vm_page_xunbusy(m); 414 } 415 } 416 417 static int 418 vm_fault_populate(struct faultstate *fs) 419 { 420 vm_offset_t vaddr; 421 vm_page_t m; 422 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 423 int bdry_idx, i, npages, psind, rv; 424 425 MPASS(fs->object == fs->first_object); 426 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 427 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 428 MPASS(fs->first_object->backing_object == NULL); 429 MPASS(fs->lookup_still_valid); 430 431 pager_first = OFF_TO_IDX(fs->entry->offset); 432 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 433 unlock_map(fs); 434 unlock_vp(fs); 435 436 /* 437 * Call the pager (driver) populate() method. 438 * 439 * There is no guarantee that the method will be called again 440 * if the current fault is for read, and a future fault is 441 * for write. Report the entry's maximum allowed protection 442 * to the driver. 443 */ 444 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 445 fs->fault_type, fs->entry->max_protection, &pager_first, 446 &pager_last); 447 448 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 449 if (rv == VM_PAGER_BAD) { 450 /* 451 * VM_PAGER_BAD is the backdoor for a pager to request 452 * normal fault handling. 453 */ 454 vm_fault_restore_map_lock(fs); 455 if (fs->map->timestamp != fs->map_generation) 456 return (KERN_RESTART); 457 return (KERN_NOT_RECEIVER); 458 } 459 if (rv != VM_PAGER_OK) 460 return (KERN_FAILURE); /* AKA SIGSEGV */ 461 462 /* Ensure that the driver is obeying the interface. */ 463 MPASS(pager_first <= pager_last); 464 MPASS(fs->first_pindex <= pager_last); 465 MPASS(fs->first_pindex >= pager_first); 466 MPASS(pager_last < fs->first_object->size); 467 468 vm_fault_restore_map_lock(fs); 469 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 470 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 471 if (fs->map->timestamp != fs->map_generation) { 472 if (bdry_idx == 0) { 473 vm_fault_populate_cleanup(fs->first_object, pager_first, 474 pager_last); 475 } else { 476 m = vm_page_lookup(fs->first_object, pager_first); 477 if (m != fs->m) 478 vm_page_xunbusy(m); 479 } 480 return (KERN_RESTART); 481 } 482 483 /* 484 * The map is unchanged after our last unlock. Process the fault. 485 * 486 * First, the special case of largepage mappings, where 487 * populate only busies the first page in superpage run. 488 */ 489 if (bdry_idx != 0) { 490 KASSERT(PMAP_HAS_LARGEPAGES, 491 ("missing pmap support for large pages")); 492 m = vm_page_lookup(fs->first_object, pager_first); 493 vm_fault_populate_check_page(m); 494 VM_OBJECT_WUNLOCK(fs->first_object); 495 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 496 fs->entry->offset; 497 /* assert alignment for entry */ 498 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 499 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 500 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 501 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 502 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 503 ("unaligned superpage m %p %#jx", m, 504 (uintmax_t)VM_PAGE_TO_PHYS(m))); 505 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 506 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 507 PMAP_ENTER_LARGEPAGE, bdry_idx); 508 VM_OBJECT_WLOCK(fs->first_object); 509 vm_page_xunbusy(m); 510 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 511 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 512 vm_page_wire(m + i); 513 } 514 if (fs->m_hold != NULL) { 515 *fs->m_hold = m + (fs->first_pindex - pager_first); 516 vm_page_wire(*fs->m_hold); 517 } 518 goto out; 519 } 520 521 /* 522 * The range [pager_first, pager_last] that is given to the 523 * pager is only a hint. The pager may populate any range 524 * within the object that includes the requested page index. 525 * In case the pager expanded the range, clip it to fit into 526 * the map entry. 527 */ 528 map_first = OFF_TO_IDX(fs->entry->offset); 529 if (map_first > pager_first) { 530 vm_fault_populate_cleanup(fs->first_object, pager_first, 531 map_first - 1); 532 pager_first = map_first; 533 } 534 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 535 if (map_last < pager_last) { 536 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 537 pager_last); 538 pager_last = map_last; 539 } 540 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 541 pidx <= pager_last; 542 pidx += npages, m = vm_page_next(&m[npages - 1])) { 543 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 544 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 545 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \ 546 defined(__powerpc64__) 547 psind = m->psind; 548 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 549 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 550 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 551 psind = 0; 552 #else 553 psind = 0; 554 #endif 555 npages = atop(pagesizes[psind]); 556 for (i = 0; i < npages; i++) { 557 vm_fault_populate_check_page(&m[i]); 558 vm_fault_dirty(fs, &m[i]); 559 } 560 VM_OBJECT_WUNLOCK(fs->first_object); 561 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 562 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 563 #if defined(__amd64__) 564 if (psind > 0 && rv == KERN_FAILURE) { 565 for (i = 0; i < npages; i++) { 566 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 567 &m[i], fs->prot, fs->fault_type | 568 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 569 MPASS(rv == KERN_SUCCESS); 570 } 571 } 572 #else 573 MPASS(rv == KERN_SUCCESS); 574 #endif 575 VM_OBJECT_WLOCK(fs->first_object); 576 for (i = 0; i < npages; i++) { 577 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 578 vm_page_wire(&m[i]); 579 else 580 vm_page_activate(&m[i]); 581 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 582 (*fs->m_hold) = &m[i]; 583 vm_page_wire(&m[i]); 584 } 585 vm_page_xunbusy(&m[i]); 586 } 587 } 588 out: 589 curthread->td_ru.ru_majflt++; 590 return (KERN_SUCCESS); 591 } 592 593 static int prot_fault_translation; 594 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 595 &prot_fault_translation, 0, 596 "Control signal to deliver on protection fault"); 597 598 /* compat definition to keep common code for signal translation */ 599 #define UCODE_PAGEFLT 12 600 #ifdef T_PAGEFLT 601 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 602 #endif 603 604 /* 605 * vm_fault_trap: 606 * 607 * Handle a page fault occurring at the given address, 608 * requiring the given permissions, in the map specified. 609 * If successful, the page is inserted into the 610 * associated physical map. 611 * 612 * NOTE: the given address should be truncated to the 613 * proper page address. 614 * 615 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 616 * a standard error specifying why the fault is fatal is returned. 617 * 618 * The map in question must be referenced, and remains so. 619 * Caller may hold no locks. 620 */ 621 int 622 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 623 int fault_flags, int *signo, int *ucode) 624 { 625 int result; 626 627 MPASS(signo == NULL || ucode != NULL); 628 #ifdef KTRACE 629 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 630 ktrfault(vaddr, fault_type); 631 #endif 632 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 633 NULL); 634 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 635 result == KERN_INVALID_ADDRESS || 636 result == KERN_RESOURCE_SHORTAGE || 637 result == KERN_PROTECTION_FAILURE || 638 result == KERN_OUT_OF_BOUNDS, 639 ("Unexpected Mach error %d from vm_fault()", result)); 640 #ifdef KTRACE 641 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 642 ktrfaultend(result); 643 #endif 644 if (result != KERN_SUCCESS && signo != NULL) { 645 switch (result) { 646 case KERN_FAILURE: 647 case KERN_INVALID_ADDRESS: 648 *signo = SIGSEGV; 649 *ucode = SEGV_MAPERR; 650 break; 651 case KERN_RESOURCE_SHORTAGE: 652 *signo = SIGBUS; 653 *ucode = BUS_OOMERR; 654 break; 655 case KERN_OUT_OF_BOUNDS: 656 *signo = SIGBUS; 657 *ucode = BUS_OBJERR; 658 break; 659 case KERN_PROTECTION_FAILURE: 660 if (prot_fault_translation == 0) { 661 /* 662 * Autodetect. This check also covers 663 * the images without the ABI-tag ELF 664 * note. 665 */ 666 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 667 curproc->p_osrel >= P_OSREL_SIGSEGV) { 668 *signo = SIGSEGV; 669 *ucode = SEGV_ACCERR; 670 } else { 671 *signo = SIGBUS; 672 *ucode = UCODE_PAGEFLT; 673 } 674 } else if (prot_fault_translation == 1) { 675 /* Always compat mode. */ 676 *signo = SIGBUS; 677 *ucode = UCODE_PAGEFLT; 678 } else { 679 /* Always SIGSEGV mode. */ 680 *signo = SIGSEGV; 681 *ucode = SEGV_ACCERR; 682 } 683 break; 684 default: 685 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 686 result)); 687 break; 688 } 689 } 690 return (result); 691 } 692 693 static int 694 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 695 { 696 struct vnode *vp; 697 int error, locked; 698 699 if (fs->object->type != OBJT_VNODE) 700 return (KERN_SUCCESS); 701 vp = fs->object->handle; 702 if (vp == fs->vp) { 703 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 704 return (KERN_SUCCESS); 705 } 706 707 /* 708 * Perform an unlock in case the desired vnode changed while 709 * the map was unlocked during a retry. 710 */ 711 unlock_vp(fs); 712 713 locked = VOP_ISLOCKED(vp); 714 if (locked != LK_EXCLUSIVE) 715 locked = LK_SHARED; 716 717 /* 718 * We must not sleep acquiring the vnode lock while we have 719 * the page exclusive busied or the object's 720 * paging-in-progress count incremented. Otherwise, we could 721 * deadlock. 722 */ 723 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 724 if (error == 0) { 725 fs->vp = vp; 726 return (KERN_SUCCESS); 727 } 728 729 vhold(vp); 730 if (objlocked) 731 unlock_and_deallocate(fs); 732 else 733 fault_deallocate(fs); 734 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 735 vdrop(vp); 736 fs->vp = vp; 737 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 738 return (KERN_RESOURCE_SHORTAGE); 739 } 740 741 /* 742 * Calculate the desired readahead. Handle drop-behind. 743 * 744 * Returns the number of readahead blocks to pass to the pager. 745 */ 746 static int 747 vm_fault_readahead(struct faultstate *fs) 748 { 749 int era, nera; 750 u_char behavior; 751 752 KASSERT(fs->lookup_still_valid, ("map unlocked")); 753 era = fs->entry->read_ahead; 754 behavior = vm_map_entry_behavior(fs->entry); 755 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 756 nera = 0; 757 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 758 nera = VM_FAULT_READ_AHEAD_MAX; 759 if (fs->vaddr == fs->entry->next_read) 760 vm_fault_dontneed(fs, fs->vaddr, nera); 761 } else if (fs->vaddr == fs->entry->next_read) { 762 /* 763 * This is a sequential fault. Arithmetically 764 * increase the requested number of pages in 765 * the read-ahead window. The requested 766 * number of pages is "# of sequential faults 767 * x (read ahead min + 1) + read ahead min" 768 */ 769 nera = VM_FAULT_READ_AHEAD_MIN; 770 if (era > 0) { 771 nera += era + 1; 772 if (nera > VM_FAULT_READ_AHEAD_MAX) 773 nera = VM_FAULT_READ_AHEAD_MAX; 774 } 775 if (era == VM_FAULT_READ_AHEAD_MAX) 776 vm_fault_dontneed(fs, fs->vaddr, nera); 777 } else { 778 /* 779 * This is a non-sequential fault. 780 */ 781 nera = 0; 782 } 783 if (era != nera) { 784 /* 785 * A read lock on the map suffices to update 786 * the read ahead count safely. 787 */ 788 fs->entry->read_ahead = nera; 789 } 790 791 return (nera); 792 } 793 794 static int 795 vm_fault_lookup(struct faultstate *fs) 796 { 797 int result; 798 799 KASSERT(!fs->lookup_still_valid, 800 ("vm_fault_lookup: Map already locked.")); 801 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 802 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 803 &fs->first_pindex, &fs->prot, &fs->wired); 804 if (result != KERN_SUCCESS) { 805 unlock_vp(fs); 806 return (result); 807 } 808 809 fs->map_generation = fs->map->timestamp; 810 811 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 812 panic("%s: fault on nofault entry, addr: %#lx", 813 __func__, (u_long)fs->vaddr); 814 } 815 816 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 817 fs->entry->wiring_thread != curthread) { 818 vm_map_unlock_read(fs->map); 819 vm_map_lock(fs->map); 820 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 821 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 822 unlock_vp(fs); 823 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 824 vm_map_unlock_and_wait(fs->map, 0); 825 } else 826 vm_map_unlock(fs->map); 827 return (KERN_RESOURCE_SHORTAGE); 828 } 829 830 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 831 832 if (fs->wired) 833 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 834 else 835 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 836 ("!fs->wired && VM_FAULT_WIRE")); 837 fs->lookup_still_valid = true; 838 839 return (KERN_SUCCESS); 840 } 841 842 static int 843 vm_fault_relookup(struct faultstate *fs) 844 { 845 vm_object_t retry_object; 846 vm_pindex_t retry_pindex; 847 vm_prot_t retry_prot; 848 int result; 849 850 if (!vm_map_trylock_read(fs->map)) 851 return (KERN_RESTART); 852 853 fs->lookup_still_valid = true; 854 if (fs->map->timestamp == fs->map_generation) 855 return (KERN_SUCCESS); 856 857 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 858 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 859 &fs->wired); 860 if (result != KERN_SUCCESS) { 861 /* 862 * If retry of map lookup would have blocked then 863 * retry fault from start. 864 */ 865 if (result == KERN_FAILURE) 866 return (KERN_RESTART); 867 return (result); 868 } 869 if (retry_object != fs->first_object || 870 retry_pindex != fs->first_pindex) 871 return (KERN_RESTART); 872 873 /* 874 * Check whether the protection has changed or the object has 875 * been copied while we left the map unlocked. Changing from 876 * read to write permission is OK - we leave the page 877 * write-protected, and catch the write fault. Changing from 878 * write to read permission means that we can't mark the page 879 * write-enabled after all. 880 */ 881 fs->prot &= retry_prot; 882 fs->fault_type &= retry_prot; 883 if (fs->prot == 0) 884 return (KERN_RESTART); 885 886 /* Reassert because wired may have changed. */ 887 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 888 ("!wired && VM_FAULT_WIRE")); 889 890 return (KERN_SUCCESS); 891 } 892 893 static void 894 vm_fault_cow(struct faultstate *fs) 895 { 896 bool is_first_object_locked; 897 898 /* 899 * This allows pages to be virtually copied from a backing_object 900 * into the first_object, where the backing object has no other 901 * refs to it, and cannot gain any more refs. Instead of a bcopy, 902 * we just move the page from the backing object to the first 903 * object. Note that we must mark the page dirty in the first 904 * object so that it will go out to swap when needed. 905 */ 906 is_first_object_locked = false; 907 if ( 908 /* 909 * Only one shadow object and no other refs. 910 */ 911 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 912 /* 913 * No other ways to look the object up 914 */ 915 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 916 /* 917 * We don't chase down the shadow chain and we can acquire locks. 918 */ 919 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 920 fs->object == fs->first_object->backing_object && 921 VM_OBJECT_TRYWLOCK(fs->object)) { 922 /* 923 * Remove but keep xbusy for replace. fs->m is moved into 924 * fs->first_object and left busy while fs->first_m is 925 * conditionally freed. 926 */ 927 vm_page_remove_xbusy(fs->m); 928 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 929 fs->first_m); 930 vm_page_dirty(fs->m); 931 #if VM_NRESERVLEVEL > 0 932 /* 933 * Rename the reservation. 934 */ 935 vm_reserv_rename(fs->m, fs->first_object, fs->object, 936 OFF_TO_IDX(fs->first_object->backing_object_offset)); 937 #endif 938 VM_OBJECT_WUNLOCK(fs->object); 939 VM_OBJECT_WUNLOCK(fs->first_object); 940 fs->first_m = fs->m; 941 fs->m = NULL; 942 VM_CNT_INC(v_cow_optim); 943 } else { 944 if (is_first_object_locked) 945 VM_OBJECT_WUNLOCK(fs->first_object); 946 /* 947 * Oh, well, lets copy it. 948 */ 949 pmap_copy_page(fs->m, fs->first_m); 950 vm_page_valid(fs->first_m); 951 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 952 vm_page_wire(fs->first_m); 953 vm_page_unwire(fs->m, PQ_INACTIVE); 954 } 955 /* 956 * Save the cow page to be released after 957 * pmap_enter is complete. 958 */ 959 fs->m_cow = fs->m; 960 fs->m = NULL; 961 } 962 /* 963 * fs->object != fs->first_object due to above 964 * conditional 965 */ 966 vm_object_pip_wakeup(fs->object); 967 968 /* 969 * Only use the new page below... 970 */ 971 fs->object = fs->first_object; 972 fs->pindex = fs->first_pindex; 973 fs->m = fs->first_m; 974 VM_CNT_INC(v_cow_faults); 975 curthread->td_cow++; 976 } 977 978 static bool 979 vm_fault_next(struct faultstate *fs) 980 { 981 vm_object_t next_object; 982 983 /* 984 * The requested page does not exist at this object/ 985 * offset. Remove the invalid page from the object, 986 * waking up anyone waiting for it, and continue on to 987 * the next object. However, if this is the top-level 988 * object, we must leave the busy page in place to 989 * prevent another process from rushing past us, and 990 * inserting the page in that object at the same time 991 * that we are. 992 */ 993 if (fs->object == fs->first_object) { 994 fs->first_m = fs->m; 995 fs->m = NULL; 996 } else 997 fault_page_free(&fs->m); 998 999 /* 1000 * Move on to the next object. Lock the next object before 1001 * unlocking the current one. 1002 */ 1003 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1004 next_object = fs->object->backing_object; 1005 if (next_object == NULL) 1006 return (false); 1007 MPASS(fs->first_m != NULL); 1008 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1009 VM_OBJECT_WLOCK(next_object); 1010 vm_object_pip_add(next_object, 1); 1011 if (fs->object != fs->first_object) 1012 vm_object_pip_wakeup(fs->object); 1013 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1014 VM_OBJECT_WUNLOCK(fs->object); 1015 fs->object = next_object; 1016 1017 return (true); 1018 } 1019 1020 static void 1021 vm_fault_zerofill(struct faultstate *fs) 1022 { 1023 1024 /* 1025 * If there's no object left, fill the page in the top 1026 * object with zeros. 1027 */ 1028 if (fs->object != fs->first_object) { 1029 vm_object_pip_wakeup(fs->object); 1030 fs->object = fs->first_object; 1031 fs->pindex = fs->first_pindex; 1032 } 1033 MPASS(fs->first_m != NULL); 1034 MPASS(fs->m == NULL); 1035 fs->m = fs->first_m; 1036 fs->first_m = NULL; 1037 1038 /* 1039 * Zero the page if necessary and mark it valid. 1040 */ 1041 if ((fs->m->flags & PG_ZERO) == 0) { 1042 pmap_zero_page(fs->m); 1043 } else { 1044 VM_CNT_INC(v_ozfod); 1045 } 1046 VM_CNT_INC(v_zfod); 1047 vm_page_valid(fs->m); 1048 } 1049 1050 /* 1051 * Allocate a page directly or via the object populate method. 1052 */ 1053 static int 1054 vm_fault_allocate(struct faultstate *fs) 1055 { 1056 struct domainset *dset; 1057 int alloc_req; 1058 int rv; 1059 1060 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1061 rv = vm_fault_lock_vnode(fs, true); 1062 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1063 if (rv == KERN_RESOURCE_SHORTAGE) 1064 return (rv); 1065 } 1066 1067 if (fs->pindex >= fs->object->size) 1068 return (KERN_OUT_OF_BOUNDS); 1069 1070 if (fs->object == fs->first_object && 1071 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1072 fs->first_object->shadow_count == 0) { 1073 rv = vm_fault_populate(fs); 1074 switch (rv) { 1075 case KERN_SUCCESS: 1076 case KERN_FAILURE: 1077 case KERN_RESTART: 1078 return (rv); 1079 case KERN_NOT_RECEIVER: 1080 /* 1081 * Pager's populate() method 1082 * returned VM_PAGER_BAD. 1083 */ 1084 break; 1085 default: 1086 panic("inconsistent return codes"); 1087 } 1088 } 1089 1090 /* 1091 * Allocate a new page for this object/offset pair. 1092 * 1093 * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1094 * might be not observed there, and allocation can fail, causing 1095 * restart and new reading of the p_flag. 1096 */ 1097 dset = fs->object->domain.dr_policy; 1098 if (dset == NULL) 1099 dset = curthread->td_domain.dr_policy; 1100 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1101 #if VM_NRESERVLEVEL > 0 1102 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1103 #endif 1104 alloc_req = P_KILLED(curproc) ? 1105 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1106 if (fs->object->type != OBJT_VNODE && 1107 fs->object->backing_object == NULL) 1108 alloc_req |= VM_ALLOC_ZERO; 1109 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1110 } 1111 if (fs->m == NULL) { 1112 unlock_and_deallocate(fs); 1113 if (vm_pfault_oom_attempts < 0 || 1114 fs->oom < vm_pfault_oom_attempts) { 1115 fs->oom++; 1116 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1117 } else { 1118 if (bootverbose) 1119 printf( 1120 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1121 curproc->p_pid, curproc->p_comm); 1122 vm_pageout_oom(VM_OOM_MEM_PF); 1123 fs->oom = 0; 1124 } 1125 return (KERN_RESOURCE_SHORTAGE); 1126 } 1127 fs->oom = 0; 1128 1129 return (KERN_NOT_RECEIVER); 1130 } 1131 1132 /* 1133 * Call the pager to retrieve the page if there is a chance 1134 * that the pager has it, and potentially retrieve additional 1135 * pages at the same time. 1136 */ 1137 static int 1138 vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 1139 { 1140 vm_offset_t e_end, e_start; 1141 int ahead, behind, cluster_offset, rv; 1142 u_char behavior; 1143 1144 /* 1145 * Prepare for unlocking the map. Save the map 1146 * entry's start and end addresses, which are used to 1147 * optimize the size of the pager operation below. 1148 * Even if the map entry's addresses change after 1149 * unlocking the map, using the saved addresses is 1150 * safe. 1151 */ 1152 e_start = fs->entry->start; 1153 e_end = fs->entry->end; 1154 behavior = vm_map_entry_behavior(fs->entry); 1155 1156 /* 1157 * Release the map lock before locking the vnode or 1158 * sleeping in the pager. (If the current object has 1159 * a shadow, then an earlier iteration of this loop 1160 * may have already unlocked the map.) 1161 */ 1162 unlock_map(fs); 1163 1164 rv = vm_fault_lock_vnode(fs, false); 1165 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1166 if (rv == KERN_RESOURCE_SHORTAGE) 1167 return (rv); 1168 KASSERT(fs->vp == NULL || !fs->map->system_map, 1169 ("vm_fault: vnode-backed object mapped by system map")); 1170 1171 /* 1172 * Page in the requested page and hint the pager, 1173 * that it may bring up surrounding pages. 1174 */ 1175 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1176 P_KILLED(curproc)) { 1177 behind = 0; 1178 ahead = 0; 1179 } else { 1180 /* Is this a sequential fault? */ 1181 if (nera > 0) { 1182 behind = 0; 1183 ahead = nera; 1184 } else { 1185 /* 1186 * Request a cluster of pages that is 1187 * aligned to a VM_FAULT_READ_DEFAULT 1188 * page offset boundary within the 1189 * object. Alignment to a page offset 1190 * boundary is more likely to coincide 1191 * with the underlying file system 1192 * block than alignment to a virtual 1193 * address boundary. 1194 */ 1195 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1196 behind = ulmin(cluster_offset, 1197 atop(fs->vaddr - e_start)); 1198 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1199 } 1200 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1201 } 1202 *behindp = behind; 1203 *aheadp = ahead; 1204 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1205 if (rv == VM_PAGER_OK) 1206 return (KERN_SUCCESS); 1207 if (rv == VM_PAGER_ERROR) 1208 printf("vm_fault: pager read error, pid %d (%s)\n", 1209 curproc->p_pid, curproc->p_comm); 1210 /* 1211 * If an I/O error occurred or the requested page was 1212 * outside the range of the pager, clean up and return 1213 * an error. 1214 */ 1215 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 1216 return (KERN_OUT_OF_BOUNDS); 1217 return (KERN_NOT_RECEIVER); 1218 } 1219 1220 /* 1221 * Wait/Retry if the page is busy. We have to do this if the page is 1222 * either exclusive or shared busy because the vm_pager may be using 1223 * read busy for pageouts (and even pageins if it is the vnode pager), 1224 * and we could end up trying to pagein and pageout the same page 1225 * simultaneously. 1226 * 1227 * We can theoretically allow the busy case on a read fault if the page 1228 * is marked valid, but since such pages are typically already pmap'd, 1229 * putting that special case in might be more effort then it is worth. 1230 * We cannot under any circumstances mess around with a shared busied 1231 * page except, perhaps, to pmap it. 1232 */ 1233 static void 1234 vm_fault_busy_sleep(struct faultstate *fs) 1235 { 1236 /* 1237 * Reference the page before unlocking and 1238 * sleeping so that the page daemon is less 1239 * likely to reclaim it. 1240 */ 1241 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1242 if (fs->object != fs->first_object) { 1243 fault_page_release(&fs->first_m); 1244 vm_object_pip_wakeup(fs->first_object); 1245 } 1246 vm_object_pip_wakeup(fs->object); 1247 unlock_map(fs); 1248 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 1249 vm_page_busy_sleep(fs->m, "vmpfw", false); 1250 else 1251 VM_OBJECT_WUNLOCK(fs->object); 1252 VM_CNT_INC(v_intrans); 1253 vm_object_deallocate(fs->first_object); 1254 } 1255 1256 int 1257 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1258 int fault_flags, vm_page_t *m_hold) 1259 { 1260 struct faultstate fs; 1261 int ahead, behind, faultcount; 1262 int nera, result, rv; 1263 bool dead, hardfault; 1264 1265 VM_CNT_INC(v_vm_faults); 1266 1267 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1268 return (KERN_PROTECTION_FAILURE); 1269 1270 fs.vp = NULL; 1271 fs.vaddr = vaddr; 1272 fs.m_hold = m_hold; 1273 fs.fault_flags = fault_flags; 1274 fs.map = map; 1275 fs.lookup_still_valid = false; 1276 fs.oom = 0; 1277 faultcount = 0; 1278 nera = -1; 1279 hardfault = false; 1280 1281 RetryFault: 1282 fs.fault_type = fault_type; 1283 1284 /* 1285 * Find the backing store object and offset into it to begin the 1286 * search. 1287 */ 1288 result = vm_fault_lookup(&fs); 1289 if (result != KERN_SUCCESS) { 1290 if (result == KERN_RESOURCE_SHORTAGE) 1291 goto RetryFault; 1292 return (result); 1293 } 1294 1295 /* 1296 * Try to avoid lock contention on the top-level object through 1297 * special-case handling of some types of page faults, specifically, 1298 * those that are mapping an existing page from the top-level object. 1299 * Under this condition, a read lock on the object suffices, allowing 1300 * multiple page faults of a similar type to run in parallel. 1301 */ 1302 if (fs.vp == NULL /* avoid locked vnode leak */ && 1303 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1304 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1305 VM_OBJECT_RLOCK(fs.first_object); 1306 rv = vm_fault_soft_fast(&fs); 1307 if (rv == KERN_SUCCESS) 1308 return (rv); 1309 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1310 VM_OBJECT_RUNLOCK(fs.first_object); 1311 VM_OBJECT_WLOCK(fs.first_object); 1312 } 1313 } else { 1314 VM_OBJECT_WLOCK(fs.first_object); 1315 } 1316 1317 /* 1318 * Make a reference to this object to prevent its disposal while we 1319 * are messing with it. Once we have the reference, the map is free 1320 * to be diddled. Since objects reference their shadows (and copies), 1321 * they will stay around as well. 1322 * 1323 * Bump the paging-in-progress count to prevent size changes (e.g. 1324 * truncation operations) during I/O. 1325 */ 1326 vm_object_reference_locked(fs.first_object); 1327 vm_object_pip_add(fs.first_object, 1); 1328 1329 fs.m_cow = fs.m = fs.first_m = NULL; 1330 1331 /* 1332 * Search for the page at object/offset. 1333 */ 1334 fs.object = fs.first_object; 1335 fs.pindex = fs.first_pindex; 1336 1337 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1338 rv = vm_fault_allocate(&fs); 1339 switch (rv) { 1340 case KERN_RESTART: 1341 unlock_and_deallocate(&fs); 1342 /* FALLTHROUGH */ 1343 case KERN_RESOURCE_SHORTAGE: 1344 goto RetryFault; 1345 case KERN_SUCCESS: 1346 case KERN_FAILURE: 1347 case KERN_OUT_OF_BOUNDS: 1348 unlock_and_deallocate(&fs); 1349 return (rv); 1350 case KERN_NOT_RECEIVER: 1351 break; 1352 default: 1353 panic("vm_fault: Unhandled rv %d", rv); 1354 } 1355 } 1356 1357 while (TRUE) { 1358 KASSERT(fs.m == NULL, 1359 ("page still set %p at loop start", fs.m)); 1360 /* 1361 * If the object is marked for imminent termination, 1362 * we retry here, since the collapse pass has raced 1363 * with us. Otherwise, if we see terminally dead 1364 * object, return fail. 1365 */ 1366 if ((fs.object->flags & OBJ_DEAD) != 0) { 1367 dead = fs.object->type == OBJT_DEAD; 1368 unlock_and_deallocate(&fs); 1369 if (dead) 1370 return (KERN_PROTECTION_FAILURE); 1371 pause("vmf_de", 1); 1372 goto RetryFault; 1373 } 1374 1375 /* 1376 * See if page is resident 1377 */ 1378 fs.m = vm_page_lookup(fs.object, fs.pindex); 1379 if (fs.m != NULL) { 1380 if (vm_page_tryxbusy(fs.m) == 0) { 1381 vm_fault_busy_sleep(&fs); 1382 goto RetryFault; 1383 } 1384 1385 /* 1386 * The page is marked busy for other processes and the 1387 * pagedaemon. If it still is completely valid we 1388 * are done. 1389 */ 1390 if (vm_page_all_valid(fs.m)) { 1391 VM_OBJECT_WUNLOCK(fs.object); 1392 break; /* break to PAGE HAS BEEN FOUND. */ 1393 } 1394 } 1395 VM_OBJECT_ASSERT_WLOCKED(fs.object); 1396 1397 /* 1398 * Page is not resident. If the pager might contain the page 1399 * or this is the beginning of the search, allocate a new 1400 * page. (Default objects are zero-fill, so there is no real 1401 * pager for them.) 1402 */ 1403 if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1404 fs.object == fs.first_object)) { 1405 rv = vm_fault_allocate(&fs); 1406 switch (rv) { 1407 case KERN_RESTART: 1408 unlock_and_deallocate(&fs); 1409 /* FALLTHROUGH */ 1410 case KERN_RESOURCE_SHORTAGE: 1411 goto RetryFault; 1412 case KERN_SUCCESS: 1413 case KERN_FAILURE: 1414 case KERN_OUT_OF_BOUNDS: 1415 unlock_and_deallocate(&fs); 1416 return (rv); 1417 case KERN_NOT_RECEIVER: 1418 break; 1419 default: 1420 panic("vm_fault: Unhandled rv %d", rv); 1421 } 1422 } 1423 1424 /* 1425 * Default objects have no pager so no exclusive busy exists 1426 * to protect this page in the chain. Skip to the next 1427 * object without dropping the lock to preserve atomicity of 1428 * shadow faults. 1429 */ 1430 if (fs.object->type != OBJT_DEFAULT) { 1431 /* 1432 * At this point, we have either allocated a new page 1433 * or found an existing page that is only partially 1434 * valid. 1435 * 1436 * We hold a reference on the current object and the 1437 * page is exclusive busied. The exclusive busy 1438 * prevents simultaneous faults and collapses while 1439 * the object lock is dropped. 1440 */ 1441 VM_OBJECT_WUNLOCK(fs.object); 1442 1443 /* 1444 * If the pager for the current object might have 1445 * the page, then determine the number of additional 1446 * pages to read and potentially reprioritize 1447 * previously read pages for earlier reclamation. 1448 * These operations should only be performed once per 1449 * page fault. Even if the current pager doesn't 1450 * have the page, the number of additional pages to 1451 * read will apply to subsequent objects in the 1452 * shadow chain. 1453 */ 1454 if (nera == -1 && !P_KILLED(curproc)) 1455 nera = vm_fault_readahead(&fs); 1456 1457 rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 1458 if (rv == KERN_SUCCESS) { 1459 faultcount = behind + 1 + ahead; 1460 hardfault = true; 1461 break; /* break to PAGE HAS BEEN FOUND. */ 1462 } 1463 if (rv == KERN_RESOURCE_SHORTAGE) 1464 goto RetryFault; 1465 VM_OBJECT_WLOCK(fs.object); 1466 if (rv == KERN_OUT_OF_BOUNDS) { 1467 fault_page_free(&fs.m); 1468 unlock_and_deallocate(&fs); 1469 return (rv); 1470 } 1471 } 1472 1473 /* 1474 * The page was not found in the current object. Try to 1475 * traverse into a backing object or zero fill if none is 1476 * found. 1477 */ 1478 if (vm_fault_next(&fs)) 1479 continue; 1480 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1481 if (fs.first_object == fs.object) 1482 fault_page_free(&fs.first_m); 1483 unlock_and_deallocate(&fs); 1484 return (KERN_OUT_OF_BOUNDS); 1485 } 1486 VM_OBJECT_WUNLOCK(fs.object); 1487 vm_fault_zerofill(&fs); 1488 /* Don't try to prefault neighboring pages. */ 1489 faultcount = 1; 1490 break; /* break to PAGE HAS BEEN FOUND. */ 1491 } 1492 1493 /* 1494 * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 1495 * busied. The object lock must no longer be held. 1496 */ 1497 vm_page_assert_xbusied(fs.m); 1498 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1499 1500 /* 1501 * If the page is being written, but isn't already owned by the 1502 * top-level object, we have to copy it into a new page owned by the 1503 * top-level object. 1504 */ 1505 if (fs.object != fs.first_object) { 1506 /* 1507 * We only really need to copy if we want to write it. 1508 */ 1509 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1510 vm_fault_cow(&fs); 1511 /* 1512 * We only try to prefault read-only mappings to the 1513 * neighboring pages when this copy-on-write fault is 1514 * a hard fault. In other cases, trying to prefault 1515 * is typically wasted effort. 1516 */ 1517 if (faultcount == 0) 1518 faultcount = 1; 1519 1520 } else { 1521 fs.prot &= ~VM_PROT_WRITE; 1522 } 1523 } 1524 1525 /* 1526 * We must verify that the maps have not changed since our last 1527 * lookup. 1528 */ 1529 if (!fs.lookup_still_valid) { 1530 result = vm_fault_relookup(&fs); 1531 if (result != KERN_SUCCESS) { 1532 fault_deallocate(&fs); 1533 if (result == KERN_RESTART) 1534 goto RetryFault; 1535 return (result); 1536 } 1537 } 1538 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1539 1540 /* 1541 * If the page was filled by a pager, save the virtual address that 1542 * should be faulted on next under a sequential access pattern to the 1543 * map entry. A read lock on the map suffices to update this address 1544 * safely. 1545 */ 1546 if (hardfault) 1547 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1548 1549 /* 1550 * Page must be completely valid or it is not fit to 1551 * map into user space. vm_pager_get_pages() ensures this. 1552 */ 1553 vm_page_assert_xbusied(fs.m); 1554 KASSERT(vm_page_all_valid(fs.m), 1555 ("vm_fault: page %p partially invalid", fs.m)); 1556 1557 vm_fault_dirty(&fs, fs.m); 1558 1559 /* 1560 * Put this page into the physical map. We had to do the unlock above 1561 * because pmap_enter() may sleep. We don't put the page 1562 * back on the active queue until later so that the pageout daemon 1563 * won't find it (yet). 1564 */ 1565 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1566 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1567 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1568 fs.wired == 0) 1569 vm_fault_prefault(&fs, vaddr, 1570 faultcount > 0 ? behind : PFBAK, 1571 faultcount > 0 ? ahead : PFFOR, false); 1572 1573 /* 1574 * If the page is not wired down, then put it where the pageout daemon 1575 * can find it. 1576 */ 1577 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1578 vm_page_wire(fs.m); 1579 else 1580 vm_page_activate(fs.m); 1581 if (fs.m_hold != NULL) { 1582 (*fs.m_hold) = fs.m; 1583 vm_page_wire(fs.m); 1584 } 1585 vm_page_xunbusy(fs.m); 1586 fs.m = NULL; 1587 1588 /* 1589 * Unlock everything, and return 1590 */ 1591 fault_deallocate(&fs); 1592 if (hardfault) { 1593 VM_CNT_INC(v_io_faults); 1594 curthread->td_ru.ru_majflt++; 1595 #ifdef RACCT 1596 if (racct_enable && fs.object->type == OBJT_VNODE) { 1597 PROC_LOCK(curproc); 1598 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1599 racct_add_force(curproc, RACCT_WRITEBPS, 1600 PAGE_SIZE + behind * PAGE_SIZE); 1601 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1602 } else { 1603 racct_add_force(curproc, RACCT_READBPS, 1604 PAGE_SIZE + ahead * PAGE_SIZE); 1605 racct_add_force(curproc, RACCT_READIOPS, 1); 1606 } 1607 PROC_UNLOCK(curproc); 1608 } 1609 #endif 1610 } else 1611 curthread->td_ru.ru_minflt++; 1612 1613 return (KERN_SUCCESS); 1614 } 1615 1616 /* 1617 * Speed up the reclamation of pages that precede the faulting pindex within 1618 * the first object of the shadow chain. Essentially, perform the equivalent 1619 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1620 * the faulting pindex by the cluster size when the pages read by vm_fault() 1621 * cross a cluster-size boundary. The cluster size is the greater of the 1622 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1623 * 1624 * When "fs->first_object" is a shadow object, the pages in the backing object 1625 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1626 * function must only be concerned with pages in the first object. 1627 */ 1628 static void 1629 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1630 { 1631 vm_map_entry_t entry; 1632 vm_object_t first_object, object; 1633 vm_offset_t end, start; 1634 vm_page_t m, m_next; 1635 vm_pindex_t pend, pstart; 1636 vm_size_t size; 1637 1638 object = fs->object; 1639 VM_OBJECT_ASSERT_UNLOCKED(object); 1640 first_object = fs->first_object; 1641 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1642 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1643 VM_OBJECT_RLOCK(first_object); 1644 size = VM_FAULT_DONTNEED_MIN; 1645 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1646 size = pagesizes[1]; 1647 end = rounddown2(vaddr, size); 1648 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1649 (entry = fs->entry)->start < end) { 1650 if (end - entry->start < size) 1651 start = entry->start; 1652 else 1653 start = end - size; 1654 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1655 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1656 entry->start); 1657 m_next = vm_page_find_least(first_object, pstart); 1658 pend = OFF_TO_IDX(entry->offset) + atop(end - 1659 entry->start); 1660 while ((m = m_next) != NULL && m->pindex < pend) { 1661 m_next = TAILQ_NEXT(m, listq); 1662 if (!vm_page_all_valid(m) || 1663 vm_page_busied(m)) 1664 continue; 1665 1666 /* 1667 * Don't clear PGA_REFERENCED, since it would 1668 * likely represent a reference by a different 1669 * process. 1670 * 1671 * Typically, at this point, prefetched pages 1672 * are still in the inactive queue. Only 1673 * pages that triggered page faults are in the 1674 * active queue. The test for whether the page 1675 * is in the inactive queue is racy; in the 1676 * worst case we will requeue the page 1677 * unnecessarily. 1678 */ 1679 if (!vm_page_inactive(m)) 1680 vm_page_deactivate(m); 1681 } 1682 } 1683 VM_OBJECT_RUNLOCK(first_object); 1684 } 1685 } 1686 1687 /* 1688 * vm_fault_prefault provides a quick way of clustering 1689 * pagefaults into a processes address space. It is a "cousin" 1690 * of vm_map_pmap_enter, except it runs at page fault time instead 1691 * of mmap time. 1692 */ 1693 static void 1694 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1695 int backward, int forward, bool obj_locked) 1696 { 1697 pmap_t pmap; 1698 vm_map_entry_t entry; 1699 vm_object_t backing_object, lobject; 1700 vm_offset_t addr, starta; 1701 vm_pindex_t pindex; 1702 vm_page_t m; 1703 int i; 1704 1705 pmap = fs->map->pmap; 1706 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1707 return; 1708 1709 entry = fs->entry; 1710 1711 if (addra < backward * PAGE_SIZE) { 1712 starta = entry->start; 1713 } else { 1714 starta = addra - backward * PAGE_SIZE; 1715 if (starta < entry->start) 1716 starta = entry->start; 1717 } 1718 1719 /* 1720 * Generate the sequence of virtual addresses that are candidates for 1721 * prefaulting in an outward spiral from the faulting virtual address, 1722 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1723 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1724 * If the candidate address doesn't have a backing physical page, then 1725 * the loop immediately terminates. 1726 */ 1727 for (i = 0; i < 2 * imax(backward, forward); i++) { 1728 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1729 PAGE_SIZE); 1730 if (addr > addra + forward * PAGE_SIZE) 1731 addr = 0; 1732 1733 if (addr < starta || addr >= entry->end) 1734 continue; 1735 1736 if (!pmap_is_prefaultable(pmap, addr)) 1737 continue; 1738 1739 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1740 lobject = entry->object.vm_object; 1741 if (!obj_locked) 1742 VM_OBJECT_RLOCK(lobject); 1743 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1744 lobject->type == OBJT_DEFAULT && 1745 (backing_object = lobject->backing_object) != NULL) { 1746 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1747 0, ("vm_fault_prefault: unaligned object offset")); 1748 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1749 VM_OBJECT_RLOCK(backing_object); 1750 if (!obj_locked || lobject != entry->object.vm_object) 1751 VM_OBJECT_RUNLOCK(lobject); 1752 lobject = backing_object; 1753 } 1754 if (m == NULL) { 1755 if (!obj_locked || lobject != entry->object.vm_object) 1756 VM_OBJECT_RUNLOCK(lobject); 1757 break; 1758 } 1759 if (vm_page_all_valid(m) && 1760 (m->flags & PG_FICTITIOUS) == 0) 1761 pmap_enter_quick(pmap, addr, m, entry->protection); 1762 if (!obj_locked || lobject != entry->object.vm_object) 1763 VM_OBJECT_RUNLOCK(lobject); 1764 } 1765 } 1766 1767 /* 1768 * Hold each of the physical pages that are mapped by the specified range of 1769 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1770 * and allow the specified types of access, "prot". If all of the implied 1771 * pages are successfully held, then the number of held pages is returned 1772 * together with pointers to those pages in the array "ma". However, if any 1773 * of the pages cannot be held, -1 is returned. 1774 */ 1775 int 1776 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1777 vm_prot_t prot, vm_page_t *ma, int max_count) 1778 { 1779 vm_offset_t end, va; 1780 vm_page_t *mp; 1781 int count; 1782 boolean_t pmap_failed; 1783 1784 if (len == 0) 1785 return (0); 1786 end = round_page(addr + len); 1787 addr = trunc_page(addr); 1788 1789 if (!vm_map_range_valid(map, addr, end)) 1790 return (-1); 1791 1792 if (atop(end - addr) > max_count) 1793 panic("vm_fault_quick_hold_pages: count > max_count"); 1794 count = atop(end - addr); 1795 1796 /* 1797 * Most likely, the physical pages are resident in the pmap, so it is 1798 * faster to try pmap_extract_and_hold() first. 1799 */ 1800 pmap_failed = FALSE; 1801 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1802 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1803 if (*mp == NULL) 1804 pmap_failed = TRUE; 1805 else if ((prot & VM_PROT_WRITE) != 0 && 1806 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1807 /* 1808 * Explicitly dirty the physical page. Otherwise, the 1809 * caller's changes may go unnoticed because they are 1810 * performed through an unmanaged mapping or by a DMA 1811 * operation. 1812 * 1813 * The object lock is not held here. 1814 * See vm_page_clear_dirty_mask(). 1815 */ 1816 vm_page_dirty(*mp); 1817 } 1818 } 1819 if (pmap_failed) { 1820 /* 1821 * One or more pages could not be held by the pmap. Either no 1822 * page was mapped at the specified virtual address or that 1823 * mapping had insufficient permissions. Attempt to fault in 1824 * and hold these pages. 1825 * 1826 * If vm_fault_disable_pagefaults() was called, 1827 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1828 * acquire MD VM locks, which means we must not call 1829 * vm_fault(). Some (out of tree) callers mark 1830 * too wide a code area with vm_fault_disable_pagefaults() 1831 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1832 * the proper behaviour explicitly. 1833 */ 1834 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1835 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1836 goto error; 1837 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1838 if (*mp == NULL && vm_fault(map, va, prot, 1839 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1840 goto error; 1841 } 1842 return (count); 1843 error: 1844 for (mp = ma; mp < ma + count; mp++) 1845 if (*mp != NULL) 1846 vm_page_unwire(*mp, PQ_INACTIVE); 1847 return (-1); 1848 } 1849 1850 /* 1851 * Routine: 1852 * vm_fault_copy_entry 1853 * Function: 1854 * Create new shadow object backing dst_entry with private copy of 1855 * all underlying pages. When src_entry is equal to dst_entry, 1856 * function implements COW for wired-down map entry. Otherwise, 1857 * it forks wired entry into dst_map. 1858 * 1859 * In/out conditions: 1860 * The source and destination maps must be locked for write. 1861 * The source map entry must be wired down (or be a sharing map 1862 * entry corresponding to a main map entry that is wired down). 1863 */ 1864 void 1865 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1866 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1867 vm_ooffset_t *fork_charge) 1868 { 1869 vm_object_t backing_object, dst_object, object, src_object; 1870 vm_pindex_t dst_pindex, pindex, src_pindex; 1871 vm_prot_t access, prot; 1872 vm_offset_t vaddr; 1873 vm_page_t dst_m; 1874 vm_page_t src_m; 1875 boolean_t upgrade; 1876 1877 #ifdef lint 1878 src_map++; 1879 #endif /* lint */ 1880 1881 upgrade = src_entry == dst_entry; 1882 access = prot = dst_entry->protection; 1883 1884 src_object = src_entry->object.vm_object; 1885 src_pindex = OFF_TO_IDX(src_entry->offset); 1886 1887 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1888 dst_object = src_object; 1889 vm_object_reference(dst_object); 1890 } else { 1891 /* 1892 * Create the top-level object for the destination entry. 1893 * Doesn't actually shadow anything - we copy the pages 1894 * directly. 1895 */ 1896 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1897 dst_entry->start), NULL, NULL, 0); 1898 #if VM_NRESERVLEVEL > 0 1899 dst_object->flags |= OBJ_COLORED; 1900 dst_object->pg_color = atop(dst_entry->start); 1901 #endif 1902 dst_object->domain = src_object->domain; 1903 dst_object->charge = dst_entry->end - dst_entry->start; 1904 } 1905 1906 VM_OBJECT_WLOCK(dst_object); 1907 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1908 ("vm_fault_copy_entry: vm_object not NULL")); 1909 if (src_object != dst_object) { 1910 dst_entry->object.vm_object = dst_object; 1911 dst_entry->offset = 0; 1912 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1913 } 1914 if (fork_charge != NULL) { 1915 KASSERT(dst_entry->cred == NULL, 1916 ("vm_fault_copy_entry: leaked swp charge")); 1917 dst_object->cred = curthread->td_ucred; 1918 crhold(dst_object->cred); 1919 *fork_charge += dst_object->charge; 1920 } else if ((dst_object->type == OBJT_DEFAULT || 1921 dst_object->type == OBJT_SWAP) && 1922 dst_object->cred == NULL) { 1923 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1924 dst_entry)); 1925 dst_object->cred = dst_entry->cred; 1926 dst_entry->cred = NULL; 1927 } 1928 1929 /* 1930 * If not an upgrade, then enter the mappings in the pmap as 1931 * read and/or execute accesses. Otherwise, enter them as 1932 * write accesses. 1933 * 1934 * A writeable large page mapping is only created if all of 1935 * the constituent small page mappings are modified. Marking 1936 * PTEs as modified on inception allows promotion to happen 1937 * without taking potentially large number of soft faults. 1938 */ 1939 if (!upgrade) 1940 access &= ~VM_PROT_WRITE; 1941 1942 /* 1943 * Loop through all of the virtual pages within the entry's 1944 * range, copying each page from the source object to the 1945 * destination object. Since the source is wired, those pages 1946 * must exist. In contrast, the destination is pageable. 1947 * Since the destination object doesn't share any backing storage 1948 * with the source object, all of its pages must be dirtied, 1949 * regardless of whether they can be written. 1950 */ 1951 for (vaddr = dst_entry->start, dst_pindex = 0; 1952 vaddr < dst_entry->end; 1953 vaddr += PAGE_SIZE, dst_pindex++) { 1954 again: 1955 /* 1956 * Find the page in the source object, and copy it in. 1957 * Because the source is wired down, the page will be 1958 * in memory. 1959 */ 1960 if (src_object != dst_object) 1961 VM_OBJECT_RLOCK(src_object); 1962 object = src_object; 1963 pindex = src_pindex + dst_pindex; 1964 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1965 (backing_object = object->backing_object) != NULL) { 1966 /* 1967 * Unless the source mapping is read-only or 1968 * it is presently being upgraded from 1969 * read-only, the first object in the shadow 1970 * chain should provide all of the pages. In 1971 * other words, this loop body should never be 1972 * executed when the source mapping is already 1973 * read/write. 1974 */ 1975 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1976 upgrade, 1977 ("vm_fault_copy_entry: main object missing page")); 1978 1979 VM_OBJECT_RLOCK(backing_object); 1980 pindex += OFF_TO_IDX(object->backing_object_offset); 1981 if (object != dst_object) 1982 VM_OBJECT_RUNLOCK(object); 1983 object = backing_object; 1984 } 1985 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1986 1987 if (object != dst_object) { 1988 /* 1989 * Allocate a page in the destination object. 1990 */ 1991 dst_m = vm_page_alloc(dst_object, (src_object == 1992 dst_object ? src_pindex : 0) + dst_pindex, 1993 VM_ALLOC_NORMAL); 1994 if (dst_m == NULL) { 1995 VM_OBJECT_WUNLOCK(dst_object); 1996 VM_OBJECT_RUNLOCK(object); 1997 vm_wait(dst_object); 1998 VM_OBJECT_WLOCK(dst_object); 1999 goto again; 2000 } 2001 pmap_copy_page(src_m, dst_m); 2002 VM_OBJECT_RUNLOCK(object); 2003 dst_m->dirty = dst_m->valid = src_m->valid; 2004 } else { 2005 dst_m = src_m; 2006 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2007 goto again; 2008 if (dst_m->pindex >= dst_object->size) { 2009 /* 2010 * We are upgrading. Index can occur 2011 * out of bounds if the object type is 2012 * vnode and the file was truncated. 2013 */ 2014 vm_page_xunbusy(dst_m); 2015 break; 2016 } 2017 } 2018 VM_OBJECT_WUNLOCK(dst_object); 2019 2020 /* 2021 * Enter it in the pmap. If a wired, copy-on-write 2022 * mapping is being replaced by a write-enabled 2023 * mapping, then wire that new mapping. 2024 * 2025 * The page can be invalid if the user called 2026 * msync(MS_INVALIDATE) or truncated the backing vnode 2027 * or shared memory object. In this case, do not 2028 * insert it into pmap, but still do the copy so that 2029 * all copies of the wired map entry have similar 2030 * backing pages. 2031 */ 2032 if (vm_page_all_valid(dst_m)) { 2033 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2034 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2035 } 2036 2037 /* 2038 * Mark it no longer busy, and put it on the active list. 2039 */ 2040 VM_OBJECT_WLOCK(dst_object); 2041 2042 if (upgrade) { 2043 if (src_m != dst_m) { 2044 vm_page_unwire(src_m, PQ_INACTIVE); 2045 vm_page_wire(dst_m); 2046 } else { 2047 KASSERT(vm_page_wired(dst_m), 2048 ("dst_m %p is not wired", dst_m)); 2049 } 2050 } else { 2051 vm_page_activate(dst_m); 2052 } 2053 vm_page_xunbusy(dst_m); 2054 } 2055 VM_OBJECT_WUNLOCK(dst_object); 2056 if (upgrade) { 2057 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2058 vm_object_deallocate(src_object); 2059 } 2060 } 2061 2062 /* 2063 * Block entry into the machine-independent layer's page fault handler by 2064 * the calling thread. Subsequent calls to vm_fault() by that thread will 2065 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2066 * spurious page faults. 2067 */ 2068 int 2069 vm_fault_disable_pagefaults(void) 2070 { 2071 2072 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2073 } 2074 2075 void 2076 vm_fault_enable_pagefaults(int save) 2077 { 2078 2079 curthread_pflags_restore(save); 2080 } 2081