1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 119 #define VM_FAULT_DONTNEED_MIN 1048576 120 121 struct faultstate { 122 /* Fault parameters. */ 123 vm_offset_t vaddr; 124 vm_page_t *m_hold; 125 vm_prot_t fault_type; 126 vm_prot_t prot; 127 int fault_flags; 128 int oom; 129 boolean_t wired; 130 131 /* Page reference for cow. */ 132 vm_page_t m_cow; 133 134 /* Current object. */ 135 vm_object_t object; 136 vm_pindex_t pindex; 137 vm_page_t m; 138 139 /* Top-level map object. */ 140 vm_object_t first_object; 141 vm_pindex_t first_pindex; 142 vm_page_t first_m; 143 144 /* Map state. */ 145 vm_map_t map; 146 vm_map_entry_t entry; 147 int map_generation; 148 bool lookup_still_valid; 149 150 /* Vnode if locked. */ 151 struct vnode *vp; 152 }; 153 154 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 155 int ahead); 156 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 157 int backward, int forward, bool obj_locked); 158 159 static int vm_pfault_oom_attempts = 3; 160 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 161 &vm_pfault_oom_attempts, 0, 162 "Number of page allocation attempts in page fault handler before it " 163 "triggers OOM handling"); 164 165 static int vm_pfault_oom_wait = 10; 166 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 167 &vm_pfault_oom_wait, 0, 168 "Number of seconds to wait for free pages before retrying " 169 "the page fault handler"); 170 171 static inline void 172 fault_page_release(vm_page_t *mp) 173 { 174 vm_page_t m; 175 176 m = *mp; 177 if (m != NULL) { 178 /* 179 * We are likely to loop around again and attempt to busy 180 * this page. Deactivating it leaves it available for 181 * pageout while optimizing fault restarts. 182 */ 183 vm_page_deactivate(m); 184 vm_page_xunbusy(m); 185 *mp = NULL; 186 } 187 } 188 189 static inline void 190 fault_page_free(vm_page_t *mp) 191 { 192 vm_page_t m; 193 194 m = *mp; 195 if (m != NULL) { 196 VM_OBJECT_ASSERT_WLOCKED(m->object); 197 if (!vm_page_wired(m)) 198 vm_page_free(m); 199 else 200 vm_page_xunbusy(m); 201 *mp = NULL; 202 } 203 } 204 205 static inline void 206 unlock_map(struct faultstate *fs) 207 { 208 209 if (fs->lookup_still_valid) { 210 vm_map_lookup_done(fs->map, fs->entry); 211 fs->lookup_still_valid = false; 212 } 213 } 214 215 static void 216 unlock_vp(struct faultstate *fs) 217 { 218 219 if (fs->vp != NULL) { 220 vput(fs->vp); 221 fs->vp = NULL; 222 } 223 } 224 225 static void 226 fault_deallocate(struct faultstate *fs) 227 { 228 229 fault_page_release(&fs->m_cow); 230 fault_page_release(&fs->m); 231 vm_object_pip_wakeup(fs->object); 232 if (fs->object != fs->first_object) { 233 VM_OBJECT_WLOCK(fs->first_object); 234 fault_page_free(&fs->first_m); 235 VM_OBJECT_WUNLOCK(fs->first_object); 236 vm_object_pip_wakeup(fs->first_object); 237 } 238 vm_object_deallocate(fs->first_object); 239 unlock_map(fs); 240 unlock_vp(fs); 241 } 242 243 static void 244 unlock_and_deallocate(struct faultstate *fs) 245 { 246 247 VM_OBJECT_WUNLOCK(fs->object); 248 fault_deallocate(fs); 249 } 250 251 static void 252 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 253 { 254 bool need_dirty; 255 256 if (((fs->prot & VM_PROT_WRITE) == 0 && 257 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 258 (m->oflags & VPO_UNMANAGED) != 0) 259 return; 260 261 VM_PAGE_OBJECT_BUSY_ASSERT(m); 262 263 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 264 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 265 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 266 267 vm_object_set_writeable_dirty(m->object); 268 269 /* 270 * If the fault is a write, we know that this page is being 271 * written NOW so dirty it explicitly to save on 272 * pmap_is_modified() calls later. 273 * 274 * Also, since the page is now dirty, we can possibly tell 275 * the pager to release any swap backing the page. 276 */ 277 if (need_dirty && vm_page_set_dirty(m) == 0) { 278 /* 279 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 280 * if the page is already dirty to prevent data written with 281 * the expectation of being synced from not being synced. 282 * Likewise if this entry does not request NOSYNC then make 283 * sure the page isn't marked NOSYNC. Applications sharing 284 * data should use the same flags to avoid ping ponging. 285 */ 286 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 287 vm_page_aflag_set(m, PGA_NOSYNC); 288 else 289 vm_page_aflag_clear(m, PGA_NOSYNC); 290 } 291 292 } 293 294 /* 295 * Unlocks fs.first_object and fs.map on success. 296 */ 297 static int 298 vm_fault_soft_fast(struct faultstate *fs) 299 { 300 vm_page_t m, m_map; 301 #if VM_NRESERVLEVEL > 0 302 vm_page_t m_super; 303 int flags; 304 #endif 305 int psind, rv; 306 vm_offset_t vaddr; 307 308 MPASS(fs->vp == NULL); 309 vaddr = fs->vaddr; 310 vm_object_busy(fs->first_object); 311 m = vm_page_lookup(fs->first_object, fs->first_pindex); 312 /* A busy page can be mapped for read|execute access. */ 313 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 314 vm_page_busied(m)) || !vm_page_all_valid(m)) { 315 rv = KERN_FAILURE; 316 goto out; 317 } 318 m_map = m; 319 psind = 0; 320 #if VM_NRESERVLEVEL > 0 321 if ((m->flags & PG_FICTITIOUS) == 0 && 322 (m_super = vm_reserv_to_superpage(m)) != NULL && 323 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 324 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 325 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 326 (pagesizes[m_super->psind] - 1)) && !fs->wired && 327 pmap_ps_enabled(fs->map->pmap)) { 328 flags = PS_ALL_VALID; 329 if ((fs->prot & VM_PROT_WRITE) != 0) { 330 /* 331 * Create a superpage mapping allowing write access 332 * only if none of the constituent pages are busy and 333 * all of them are already dirty (except possibly for 334 * the page that was faulted on). 335 */ 336 flags |= PS_NONE_BUSY; 337 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 338 flags |= PS_ALL_DIRTY; 339 } 340 if (vm_page_ps_test(m_super, flags, m)) { 341 m_map = m_super; 342 psind = m_super->psind; 343 vaddr = rounddown2(vaddr, pagesizes[psind]); 344 /* Preset the modified bit for dirty superpages. */ 345 if ((flags & PS_ALL_DIRTY) != 0) 346 fs->fault_type |= VM_PROT_WRITE; 347 } 348 } 349 #endif 350 rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 351 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 352 if (rv != KERN_SUCCESS) 353 goto out; 354 if (fs->m_hold != NULL) { 355 (*fs->m_hold) = m; 356 vm_page_wire(m); 357 } 358 if (psind == 0 && !fs->wired) 359 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 360 VM_OBJECT_RUNLOCK(fs->first_object); 361 vm_fault_dirty(fs, m); 362 vm_map_lookup_done(fs->map, fs->entry); 363 curthread->td_ru.ru_minflt++; 364 365 out: 366 vm_object_unbusy(fs->first_object); 367 return (rv); 368 } 369 370 static void 371 vm_fault_restore_map_lock(struct faultstate *fs) 372 { 373 374 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 375 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 376 377 if (!vm_map_trylock_read(fs->map)) { 378 VM_OBJECT_WUNLOCK(fs->first_object); 379 vm_map_lock_read(fs->map); 380 VM_OBJECT_WLOCK(fs->first_object); 381 } 382 fs->lookup_still_valid = true; 383 } 384 385 static void 386 vm_fault_populate_check_page(vm_page_t m) 387 { 388 389 /* 390 * Check each page to ensure that the pager is obeying the 391 * interface: the page must be installed in the object, fully 392 * valid, and exclusively busied. 393 */ 394 MPASS(m != NULL); 395 MPASS(vm_page_all_valid(m)); 396 MPASS(vm_page_xbusied(m)); 397 } 398 399 static void 400 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 401 vm_pindex_t last) 402 { 403 vm_page_t m; 404 vm_pindex_t pidx; 405 406 VM_OBJECT_ASSERT_WLOCKED(object); 407 MPASS(first <= last); 408 for (pidx = first, m = vm_page_lookup(object, pidx); 409 pidx <= last; pidx++, m = vm_page_next(m)) { 410 vm_fault_populate_check_page(m); 411 vm_page_deactivate(m); 412 vm_page_xunbusy(m); 413 } 414 } 415 416 static int 417 vm_fault_populate(struct faultstate *fs) 418 { 419 vm_offset_t vaddr; 420 vm_page_t m; 421 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 422 int bdry_idx, i, npages, psind, rv; 423 424 MPASS(fs->object == fs->first_object); 425 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 426 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 427 MPASS(fs->first_object->backing_object == NULL); 428 MPASS(fs->lookup_still_valid); 429 430 pager_first = OFF_TO_IDX(fs->entry->offset); 431 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 432 unlock_map(fs); 433 unlock_vp(fs); 434 435 /* 436 * Call the pager (driver) populate() method. 437 * 438 * There is no guarantee that the method will be called again 439 * if the current fault is for read, and a future fault is 440 * for write. Report the entry's maximum allowed protection 441 * to the driver. 442 */ 443 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 444 fs->fault_type, fs->entry->max_protection, &pager_first, 445 &pager_last); 446 447 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 448 if (rv == VM_PAGER_BAD) { 449 /* 450 * VM_PAGER_BAD is the backdoor for a pager to request 451 * normal fault handling. 452 */ 453 vm_fault_restore_map_lock(fs); 454 if (fs->map->timestamp != fs->map_generation) 455 return (KERN_RESTART); 456 return (KERN_NOT_RECEIVER); 457 } 458 if (rv != VM_PAGER_OK) 459 return (KERN_FAILURE); /* AKA SIGSEGV */ 460 461 /* Ensure that the driver is obeying the interface. */ 462 MPASS(pager_first <= pager_last); 463 MPASS(fs->first_pindex <= pager_last); 464 MPASS(fs->first_pindex >= pager_first); 465 MPASS(pager_last < fs->first_object->size); 466 467 vm_fault_restore_map_lock(fs); 468 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 469 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 470 if (fs->map->timestamp != fs->map_generation) { 471 if (bdry_idx == 0) { 472 vm_fault_populate_cleanup(fs->first_object, pager_first, 473 pager_last); 474 } else { 475 m = vm_page_lookup(fs->first_object, pager_first); 476 if (m != fs->m) 477 vm_page_xunbusy(m); 478 } 479 return (KERN_RESTART); 480 } 481 482 /* 483 * The map is unchanged after our last unlock. Process the fault. 484 * 485 * First, the special case of largepage mappings, where 486 * populate only busies the first page in superpage run. 487 */ 488 if (bdry_idx != 0) { 489 KASSERT(PMAP_HAS_LARGEPAGES, 490 ("missing pmap support for large pages")); 491 m = vm_page_lookup(fs->first_object, pager_first); 492 vm_fault_populate_check_page(m); 493 VM_OBJECT_WUNLOCK(fs->first_object); 494 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 495 fs->entry->offset; 496 /* assert alignment for entry */ 497 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 498 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 499 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 500 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 501 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 502 ("unaligned superpage m %p %#jx", m, 503 (uintmax_t)VM_PAGE_TO_PHYS(m))); 504 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 505 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 506 PMAP_ENTER_LARGEPAGE, bdry_idx); 507 VM_OBJECT_WLOCK(fs->first_object); 508 vm_page_xunbusy(m); 509 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 510 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 511 vm_page_wire(m + i); 512 } 513 if (fs->m_hold != NULL) { 514 *fs->m_hold = m + (fs->first_pindex - pager_first); 515 vm_page_wire(*fs->m_hold); 516 } 517 goto out; 518 } 519 520 /* 521 * The range [pager_first, pager_last] that is given to the 522 * pager is only a hint. The pager may populate any range 523 * within the object that includes the requested page index. 524 * In case the pager expanded the range, clip it to fit into 525 * the map entry. 526 */ 527 map_first = OFF_TO_IDX(fs->entry->offset); 528 if (map_first > pager_first) { 529 vm_fault_populate_cleanup(fs->first_object, pager_first, 530 map_first - 1); 531 pager_first = map_first; 532 } 533 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 534 if (map_last < pager_last) { 535 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 536 pager_last); 537 pager_last = map_last; 538 } 539 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 540 pidx <= pager_last; 541 pidx += npages, m = vm_page_next(&m[npages - 1])) { 542 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 543 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 544 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \ 545 defined(__powerpc64__) 546 psind = m->psind; 547 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 548 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 549 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 550 psind = 0; 551 #else 552 psind = 0; 553 #endif 554 npages = atop(pagesizes[psind]); 555 for (i = 0; i < npages; i++) { 556 vm_fault_populate_check_page(&m[i]); 557 vm_fault_dirty(fs, &m[i]); 558 } 559 VM_OBJECT_WUNLOCK(fs->first_object); 560 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 561 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 562 #if defined(__amd64__) 563 if (psind > 0 && rv == KERN_FAILURE) { 564 for (i = 0; i < npages; i++) { 565 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 566 &m[i], fs->prot, fs->fault_type | 567 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 568 MPASS(rv == KERN_SUCCESS); 569 } 570 } 571 #else 572 MPASS(rv == KERN_SUCCESS); 573 #endif 574 VM_OBJECT_WLOCK(fs->first_object); 575 for (i = 0; i < npages; i++) { 576 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 577 vm_page_wire(&m[i]); 578 else 579 vm_page_activate(&m[i]); 580 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 581 (*fs->m_hold) = &m[i]; 582 vm_page_wire(&m[i]); 583 } 584 vm_page_xunbusy(&m[i]); 585 } 586 } 587 out: 588 curthread->td_ru.ru_majflt++; 589 return (KERN_SUCCESS); 590 } 591 592 static int prot_fault_translation; 593 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 594 &prot_fault_translation, 0, 595 "Control signal to deliver on protection fault"); 596 597 /* compat definition to keep common code for signal translation */ 598 #define UCODE_PAGEFLT 12 599 #ifdef T_PAGEFLT 600 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 601 #endif 602 603 /* 604 * vm_fault_trap: 605 * 606 * Handle a page fault occurring at the given address, 607 * requiring the given permissions, in the map specified. 608 * If successful, the page is inserted into the 609 * associated physical map. 610 * 611 * NOTE: the given address should be truncated to the 612 * proper page address. 613 * 614 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 615 * a standard error specifying why the fault is fatal is returned. 616 * 617 * The map in question must be referenced, and remains so. 618 * Caller may hold no locks. 619 */ 620 int 621 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 622 int fault_flags, int *signo, int *ucode) 623 { 624 int result; 625 626 MPASS(signo == NULL || ucode != NULL); 627 #ifdef KTRACE 628 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 629 ktrfault(vaddr, fault_type); 630 #endif 631 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 632 NULL); 633 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 634 result == KERN_INVALID_ADDRESS || 635 result == KERN_RESOURCE_SHORTAGE || 636 result == KERN_PROTECTION_FAILURE || 637 result == KERN_OUT_OF_BOUNDS, 638 ("Unexpected Mach error %d from vm_fault()", result)); 639 #ifdef KTRACE 640 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 641 ktrfaultend(result); 642 #endif 643 if (result != KERN_SUCCESS && signo != NULL) { 644 switch (result) { 645 case KERN_FAILURE: 646 case KERN_INVALID_ADDRESS: 647 *signo = SIGSEGV; 648 *ucode = SEGV_MAPERR; 649 break; 650 case KERN_RESOURCE_SHORTAGE: 651 *signo = SIGBUS; 652 *ucode = BUS_OOMERR; 653 break; 654 case KERN_OUT_OF_BOUNDS: 655 *signo = SIGBUS; 656 *ucode = BUS_OBJERR; 657 break; 658 case KERN_PROTECTION_FAILURE: 659 if (prot_fault_translation == 0) { 660 /* 661 * Autodetect. This check also covers 662 * the images without the ABI-tag ELF 663 * note. 664 */ 665 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 666 curproc->p_osrel >= P_OSREL_SIGSEGV) { 667 *signo = SIGSEGV; 668 *ucode = SEGV_ACCERR; 669 } else { 670 *signo = SIGBUS; 671 *ucode = UCODE_PAGEFLT; 672 } 673 } else if (prot_fault_translation == 1) { 674 /* Always compat mode. */ 675 *signo = SIGBUS; 676 *ucode = UCODE_PAGEFLT; 677 } else { 678 /* Always SIGSEGV mode. */ 679 *signo = SIGSEGV; 680 *ucode = SEGV_ACCERR; 681 } 682 break; 683 default: 684 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 685 result)); 686 break; 687 } 688 } 689 return (result); 690 } 691 692 static int 693 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 694 { 695 struct vnode *vp; 696 int error, locked; 697 698 if (fs->object->type != OBJT_VNODE) 699 return (KERN_SUCCESS); 700 vp = fs->object->handle; 701 if (vp == fs->vp) { 702 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 703 return (KERN_SUCCESS); 704 } 705 706 /* 707 * Perform an unlock in case the desired vnode changed while 708 * the map was unlocked during a retry. 709 */ 710 unlock_vp(fs); 711 712 locked = VOP_ISLOCKED(vp); 713 if (locked != LK_EXCLUSIVE) 714 locked = LK_SHARED; 715 716 /* 717 * We must not sleep acquiring the vnode lock while we have 718 * the page exclusive busied or the object's 719 * paging-in-progress count incremented. Otherwise, we could 720 * deadlock. 721 */ 722 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 723 if (error == 0) { 724 fs->vp = vp; 725 return (KERN_SUCCESS); 726 } 727 728 vhold(vp); 729 if (objlocked) 730 unlock_and_deallocate(fs); 731 else 732 fault_deallocate(fs); 733 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 734 vdrop(vp); 735 fs->vp = vp; 736 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 737 return (KERN_RESOURCE_SHORTAGE); 738 } 739 740 /* 741 * Calculate the desired readahead. Handle drop-behind. 742 * 743 * Returns the number of readahead blocks to pass to the pager. 744 */ 745 static int 746 vm_fault_readahead(struct faultstate *fs) 747 { 748 int era, nera; 749 u_char behavior; 750 751 KASSERT(fs->lookup_still_valid, ("map unlocked")); 752 era = fs->entry->read_ahead; 753 behavior = vm_map_entry_behavior(fs->entry); 754 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 755 nera = 0; 756 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 757 nera = VM_FAULT_READ_AHEAD_MAX; 758 if (fs->vaddr == fs->entry->next_read) 759 vm_fault_dontneed(fs, fs->vaddr, nera); 760 } else if (fs->vaddr == fs->entry->next_read) { 761 /* 762 * This is a sequential fault. Arithmetically 763 * increase the requested number of pages in 764 * the read-ahead window. The requested 765 * number of pages is "# of sequential faults 766 * x (read ahead min + 1) + read ahead min" 767 */ 768 nera = VM_FAULT_READ_AHEAD_MIN; 769 if (era > 0) { 770 nera += era + 1; 771 if (nera > VM_FAULT_READ_AHEAD_MAX) 772 nera = VM_FAULT_READ_AHEAD_MAX; 773 } 774 if (era == VM_FAULT_READ_AHEAD_MAX) 775 vm_fault_dontneed(fs, fs->vaddr, nera); 776 } else { 777 /* 778 * This is a non-sequential fault. 779 */ 780 nera = 0; 781 } 782 if (era != nera) { 783 /* 784 * A read lock on the map suffices to update 785 * the read ahead count safely. 786 */ 787 fs->entry->read_ahead = nera; 788 } 789 790 return (nera); 791 } 792 793 static int 794 vm_fault_lookup(struct faultstate *fs) 795 { 796 int result; 797 798 KASSERT(!fs->lookup_still_valid, 799 ("vm_fault_lookup: Map already locked.")); 800 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 801 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 802 &fs->first_pindex, &fs->prot, &fs->wired); 803 if (result != KERN_SUCCESS) { 804 unlock_vp(fs); 805 return (result); 806 } 807 808 fs->map_generation = fs->map->timestamp; 809 810 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 811 panic("%s: fault on nofault entry, addr: %#lx", 812 __func__, (u_long)fs->vaddr); 813 } 814 815 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 816 fs->entry->wiring_thread != curthread) { 817 vm_map_unlock_read(fs->map); 818 vm_map_lock(fs->map); 819 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 820 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 821 unlock_vp(fs); 822 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 823 vm_map_unlock_and_wait(fs->map, 0); 824 } else 825 vm_map_unlock(fs->map); 826 return (KERN_RESOURCE_SHORTAGE); 827 } 828 829 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 830 831 if (fs->wired) 832 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 833 else 834 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 835 ("!fs->wired && VM_FAULT_WIRE")); 836 fs->lookup_still_valid = true; 837 838 return (KERN_SUCCESS); 839 } 840 841 static int 842 vm_fault_relookup(struct faultstate *fs) 843 { 844 vm_object_t retry_object; 845 vm_pindex_t retry_pindex; 846 vm_prot_t retry_prot; 847 int result; 848 849 if (!vm_map_trylock_read(fs->map)) 850 return (KERN_RESTART); 851 852 fs->lookup_still_valid = true; 853 if (fs->map->timestamp == fs->map_generation) 854 return (KERN_SUCCESS); 855 856 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 857 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 858 &fs->wired); 859 if (result != KERN_SUCCESS) { 860 /* 861 * If retry of map lookup would have blocked then 862 * retry fault from start. 863 */ 864 if (result == KERN_FAILURE) 865 return (KERN_RESTART); 866 return (result); 867 } 868 if (retry_object != fs->first_object || 869 retry_pindex != fs->first_pindex) 870 return (KERN_RESTART); 871 872 /* 873 * Check whether the protection has changed or the object has 874 * been copied while we left the map unlocked. Changing from 875 * read to write permission is OK - we leave the page 876 * write-protected, and catch the write fault. Changing from 877 * write to read permission means that we can't mark the page 878 * write-enabled after all. 879 */ 880 fs->prot &= retry_prot; 881 fs->fault_type &= retry_prot; 882 if (fs->prot == 0) 883 return (KERN_RESTART); 884 885 /* Reassert because wired may have changed. */ 886 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 887 ("!wired && VM_FAULT_WIRE")); 888 889 return (KERN_SUCCESS); 890 } 891 892 static void 893 vm_fault_cow(struct faultstate *fs) 894 { 895 bool is_first_object_locked; 896 897 /* 898 * This allows pages to be virtually copied from a backing_object 899 * into the first_object, where the backing object has no other 900 * refs to it, and cannot gain any more refs. Instead of a bcopy, 901 * we just move the page from the backing object to the first 902 * object. Note that we must mark the page dirty in the first 903 * object so that it will go out to swap when needed. 904 */ 905 is_first_object_locked = false; 906 if ( 907 /* 908 * Only one shadow object and no other refs. 909 */ 910 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 911 /* 912 * No other ways to look the object up 913 */ 914 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 915 /* 916 * We don't chase down the shadow chain and we can acquire locks. 917 */ 918 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 919 fs->object == fs->first_object->backing_object && 920 VM_OBJECT_TRYWLOCK(fs->object)) { 921 /* 922 * Remove but keep xbusy for replace. fs->m is moved into 923 * fs->first_object and left busy while fs->first_m is 924 * conditionally freed. 925 */ 926 vm_page_remove_xbusy(fs->m); 927 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 928 fs->first_m); 929 vm_page_dirty(fs->m); 930 #if VM_NRESERVLEVEL > 0 931 /* 932 * Rename the reservation. 933 */ 934 vm_reserv_rename(fs->m, fs->first_object, fs->object, 935 OFF_TO_IDX(fs->first_object->backing_object_offset)); 936 #endif 937 VM_OBJECT_WUNLOCK(fs->object); 938 VM_OBJECT_WUNLOCK(fs->first_object); 939 fs->first_m = fs->m; 940 fs->m = NULL; 941 VM_CNT_INC(v_cow_optim); 942 } else { 943 if (is_first_object_locked) 944 VM_OBJECT_WUNLOCK(fs->first_object); 945 /* 946 * Oh, well, lets copy it. 947 */ 948 pmap_copy_page(fs->m, fs->first_m); 949 vm_page_valid(fs->first_m); 950 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 951 vm_page_wire(fs->first_m); 952 vm_page_unwire(fs->m, PQ_INACTIVE); 953 } 954 /* 955 * Save the cow page to be released after 956 * pmap_enter is complete. 957 */ 958 fs->m_cow = fs->m; 959 fs->m = NULL; 960 } 961 /* 962 * fs->object != fs->first_object due to above 963 * conditional 964 */ 965 vm_object_pip_wakeup(fs->object); 966 967 /* 968 * Only use the new page below... 969 */ 970 fs->object = fs->first_object; 971 fs->pindex = fs->first_pindex; 972 fs->m = fs->first_m; 973 VM_CNT_INC(v_cow_faults); 974 curthread->td_cow++; 975 } 976 977 static bool 978 vm_fault_next(struct faultstate *fs) 979 { 980 vm_object_t next_object; 981 982 /* 983 * The requested page does not exist at this object/ 984 * offset. Remove the invalid page from the object, 985 * waking up anyone waiting for it, and continue on to 986 * the next object. However, if this is the top-level 987 * object, we must leave the busy page in place to 988 * prevent another process from rushing past us, and 989 * inserting the page in that object at the same time 990 * that we are. 991 */ 992 if (fs->object == fs->first_object) { 993 fs->first_m = fs->m; 994 fs->m = NULL; 995 } else 996 fault_page_free(&fs->m); 997 998 /* 999 * Move on to the next object. Lock the next object before 1000 * unlocking the current one. 1001 */ 1002 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1003 next_object = fs->object->backing_object; 1004 if (next_object == NULL) 1005 return (false); 1006 MPASS(fs->first_m != NULL); 1007 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1008 VM_OBJECT_WLOCK(next_object); 1009 vm_object_pip_add(next_object, 1); 1010 if (fs->object != fs->first_object) 1011 vm_object_pip_wakeup(fs->object); 1012 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1013 VM_OBJECT_WUNLOCK(fs->object); 1014 fs->object = next_object; 1015 1016 return (true); 1017 } 1018 1019 static void 1020 vm_fault_zerofill(struct faultstate *fs) 1021 { 1022 1023 /* 1024 * If there's no object left, fill the page in the top 1025 * object with zeros. 1026 */ 1027 if (fs->object != fs->first_object) { 1028 vm_object_pip_wakeup(fs->object); 1029 fs->object = fs->first_object; 1030 fs->pindex = fs->first_pindex; 1031 } 1032 MPASS(fs->first_m != NULL); 1033 MPASS(fs->m == NULL); 1034 fs->m = fs->first_m; 1035 fs->first_m = NULL; 1036 1037 /* 1038 * Zero the page if necessary and mark it valid. 1039 */ 1040 if ((fs->m->flags & PG_ZERO) == 0) { 1041 pmap_zero_page(fs->m); 1042 } else { 1043 VM_CNT_INC(v_ozfod); 1044 } 1045 VM_CNT_INC(v_zfod); 1046 vm_page_valid(fs->m); 1047 } 1048 1049 /* 1050 * Allocate a page directly or via the object populate method. 1051 */ 1052 static int 1053 vm_fault_allocate(struct faultstate *fs) 1054 { 1055 struct domainset *dset; 1056 int alloc_req; 1057 int rv; 1058 1059 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1060 rv = vm_fault_lock_vnode(fs, true); 1061 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1062 if (rv == KERN_RESOURCE_SHORTAGE) 1063 return (rv); 1064 } 1065 1066 if (fs->pindex >= fs->object->size) 1067 return (KERN_OUT_OF_BOUNDS); 1068 1069 if (fs->object == fs->first_object && 1070 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1071 fs->first_object->shadow_count == 0) { 1072 rv = vm_fault_populate(fs); 1073 switch (rv) { 1074 case KERN_SUCCESS: 1075 case KERN_FAILURE: 1076 case KERN_RESTART: 1077 return (rv); 1078 case KERN_NOT_RECEIVER: 1079 /* 1080 * Pager's populate() method 1081 * returned VM_PAGER_BAD. 1082 */ 1083 break; 1084 default: 1085 panic("inconsistent return codes"); 1086 } 1087 } 1088 1089 /* 1090 * Allocate a new page for this object/offset pair. 1091 * 1092 * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1093 * might be not observed there, and allocation can fail, causing 1094 * restart and new reading of the p_flag. 1095 */ 1096 dset = fs->object->domain.dr_policy; 1097 if (dset == NULL) 1098 dset = curthread->td_domain.dr_policy; 1099 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1100 #if VM_NRESERVLEVEL > 0 1101 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1102 #endif 1103 alloc_req = P_KILLED(curproc) ? 1104 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1105 if (fs->object->type != OBJT_VNODE && 1106 fs->object->backing_object == NULL) 1107 alloc_req |= VM_ALLOC_ZERO; 1108 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1109 } 1110 if (fs->m == NULL) { 1111 unlock_and_deallocate(fs); 1112 if (vm_pfault_oom_attempts < 0 || 1113 fs->oom < vm_pfault_oom_attempts) { 1114 fs->oom++; 1115 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1116 } else { 1117 if (bootverbose) 1118 printf( 1119 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1120 curproc->p_pid, curproc->p_comm); 1121 vm_pageout_oom(VM_OOM_MEM_PF); 1122 fs->oom = 0; 1123 } 1124 return (KERN_RESOURCE_SHORTAGE); 1125 } 1126 fs->oom = 0; 1127 1128 return (KERN_NOT_RECEIVER); 1129 } 1130 1131 /* 1132 * Call the pager to retrieve the page if there is a chance 1133 * that the pager has it, and potentially retrieve additional 1134 * pages at the same time. 1135 */ 1136 static int 1137 vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 1138 { 1139 vm_offset_t e_end, e_start; 1140 int ahead, behind, cluster_offset, rv; 1141 u_char behavior; 1142 1143 /* 1144 * Prepare for unlocking the map. Save the map 1145 * entry's start and end addresses, which are used to 1146 * optimize the size of the pager operation below. 1147 * Even if the map entry's addresses change after 1148 * unlocking the map, using the saved addresses is 1149 * safe. 1150 */ 1151 e_start = fs->entry->start; 1152 e_end = fs->entry->end; 1153 behavior = vm_map_entry_behavior(fs->entry); 1154 1155 /* 1156 * Release the map lock before locking the vnode or 1157 * sleeping in the pager. (If the current object has 1158 * a shadow, then an earlier iteration of this loop 1159 * may have already unlocked the map.) 1160 */ 1161 unlock_map(fs); 1162 1163 rv = vm_fault_lock_vnode(fs, false); 1164 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1165 if (rv == KERN_RESOURCE_SHORTAGE) 1166 return (rv); 1167 KASSERT(fs->vp == NULL || !fs->map->system_map, 1168 ("vm_fault: vnode-backed object mapped by system map")); 1169 1170 /* 1171 * Page in the requested page and hint the pager, 1172 * that it may bring up surrounding pages. 1173 */ 1174 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1175 P_KILLED(curproc)) { 1176 behind = 0; 1177 ahead = 0; 1178 } else { 1179 /* Is this a sequential fault? */ 1180 if (nera > 0) { 1181 behind = 0; 1182 ahead = nera; 1183 } else { 1184 /* 1185 * Request a cluster of pages that is 1186 * aligned to a VM_FAULT_READ_DEFAULT 1187 * page offset boundary within the 1188 * object. Alignment to a page offset 1189 * boundary is more likely to coincide 1190 * with the underlying file system 1191 * block than alignment to a virtual 1192 * address boundary. 1193 */ 1194 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1195 behind = ulmin(cluster_offset, 1196 atop(fs->vaddr - e_start)); 1197 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1198 } 1199 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1200 } 1201 *behindp = behind; 1202 *aheadp = ahead; 1203 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1204 if (rv == VM_PAGER_OK) 1205 return (KERN_SUCCESS); 1206 if (rv == VM_PAGER_ERROR) 1207 printf("vm_fault: pager read error, pid %d (%s)\n", 1208 curproc->p_pid, curproc->p_comm); 1209 /* 1210 * If an I/O error occurred or the requested page was 1211 * outside the range of the pager, clean up and return 1212 * an error. 1213 */ 1214 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 1215 return (KERN_OUT_OF_BOUNDS); 1216 return (KERN_NOT_RECEIVER); 1217 } 1218 1219 /* 1220 * Wait/Retry if the page is busy. We have to do this if the page is 1221 * either exclusive or shared busy because the vm_pager may be using 1222 * read busy for pageouts (and even pageins if it is the vnode pager), 1223 * and we could end up trying to pagein and pageout the same page 1224 * simultaneously. 1225 * 1226 * We can theoretically allow the busy case on a read fault if the page 1227 * is marked valid, but since such pages are typically already pmap'd, 1228 * putting that special case in might be more effort then it is worth. 1229 * We cannot under any circumstances mess around with a shared busied 1230 * page except, perhaps, to pmap it. 1231 */ 1232 static void 1233 vm_fault_busy_sleep(struct faultstate *fs) 1234 { 1235 /* 1236 * Reference the page before unlocking and 1237 * sleeping so that the page daemon is less 1238 * likely to reclaim it. 1239 */ 1240 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1241 if (fs->object != fs->first_object) { 1242 fault_page_release(&fs->first_m); 1243 vm_object_pip_wakeup(fs->first_object); 1244 } 1245 vm_object_pip_wakeup(fs->object); 1246 unlock_map(fs); 1247 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 1248 vm_page_busy_sleep(fs->m, "vmpfw", false); 1249 else 1250 VM_OBJECT_WUNLOCK(fs->object); 1251 VM_CNT_INC(v_intrans); 1252 vm_object_deallocate(fs->first_object); 1253 } 1254 1255 int 1256 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1257 int fault_flags, vm_page_t *m_hold) 1258 { 1259 struct faultstate fs; 1260 int ahead, behind, faultcount; 1261 int nera, result, rv; 1262 bool dead, hardfault; 1263 1264 VM_CNT_INC(v_vm_faults); 1265 1266 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1267 return (KERN_PROTECTION_FAILURE); 1268 1269 fs.vp = NULL; 1270 fs.vaddr = vaddr; 1271 fs.m_hold = m_hold; 1272 fs.fault_flags = fault_flags; 1273 fs.map = map; 1274 fs.lookup_still_valid = false; 1275 fs.oom = 0; 1276 faultcount = 0; 1277 nera = -1; 1278 hardfault = false; 1279 1280 RetryFault: 1281 fs.fault_type = fault_type; 1282 1283 /* 1284 * Find the backing store object and offset into it to begin the 1285 * search. 1286 */ 1287 result = vm_fault_lookup(&fs); 1288 if (result != KERN_SUCCESS) { 1289 if (result == KERN_RESOURCE_SHORTAGE) 1290 goto RetryFault; 1291 return (result); 1292 } 1293 1294 /* 1295 * Try to avoid lock contention on the top-level object through 1296 * special-case handling of some types of page faults, specifically, 1297 * those that are mapping an existing page from the top-level object. 1298 * Under this condition, a read lock on the object suffices, allowing 1299 * multiple page faults of a similar type to run in parallel. 1300 */ 1301 if (fs.vp == NULL /* avoid locked vnode leak */ && 1302 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1303 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1304 VM_OBJECT_RLOCK(fs.first_object); 1305 rv = vm_fault_soft_fast(&fs); 1306 if (rv == KERN_SUCCESS) 1307 return (rv); 1308 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1309 VM_OBJECT_RUNLOCK(fs.first_object); 1310 VM_OBJECT_WLOCK(fs.first_object); 1311 } 1312 } else { 1313 VM_OBJECT_WLOCK(fs.first_object); 1314 } 1315 1316 /* 1317 * Make a reference to this object to prevent its disposal while we 1318 * are messing with it. Once we have the reference, the map is free 1319 * to be diddled. Since objects reference their shadows (and copies), 1320 * they will stay around as well. 1321 * 1322 * Bump the paging-in-progress count to prevent size changes (e.g. 1323 * truncation operations) during I/O. 1324 */ 1325 vm_object_reference_locked(fs.first_object); 1326 vm_object_pip_add(fs.first_object, 1); 1327 1328 fs.m_cow = fs.m = fs.first_m = NULL; 1329 1330 /* 1331 * Search for the page at object/offset. 1332 */ 1333 fs.object = fs.first_object; 1334 fs.pindex = fs.first_pindex; 1335 1336 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1337 rv = vm_fault_allocate(&fs); 1338 switch (rv) { 1339 case KERN_RESTART: 1340 unlock_and_deallocate(&fs); 1341 /* FALLTHROUGH */ 1342 case KERN_RESOURCE_SHORTAGE: 1343 goto RetryFault; 1344 case KERN_SUCCESS: 1345 case KERN_FAILURE: 1346 case KERN_OUT_OF_BOUNDS: 1347 unlock_and_deallocate(&fs); 1348 return (rv); 1349 case KERN_NOT_RECEIVER: 1350 break; 1351 default: 1352 panic("vm_fault: Unhandled rv %d", rv); 1353 } 1354 } 1355 1356 while (TRUE) { 1357 KASSERT(fs.m == NULL, 1358 ("page still set %p at loop start", fs.m)); 1359 /* 1360 * If the object is marked for imminent termination, 1361 * we retry here, since the collapse pass has raced 1362 * with us. Otherwise, if we see terminally dead 1363 * object, return fail. 1364 */ 1365 if ((fs.object->flags & OBJ_DEAD) != 0) { 1366 dead = fs.object->type == OBJT_DEAD; 1367 unlock_and_deallocate(&fs); 1368 if (dead) 1369 return (KERN_PROTECTION_FAILURE); 1370 pause("vmf_de", 1); 1371 goto RetryFault; 1372 } 1373 1374 /* 1375 * See if page is resident 1376 */ 1377 fs.m = vm_page_lookup(fs.object, fs.pindex); 1378 if (fs.m != NULL) { 1379 if (vm_page_tryxbusy(fs.m) == 0) { 1380 vm_fault_busy_sleep(&fs); 1381 goto RetryFault; 1382 } 1383 1384 /* 1385 * The page is marked busy for other processes and the 1386 * pagedaemon. If it still is completely valid we 1387 * are done. 1388 */ 1389 if (vm_page_all_valid(fs.m)) { 1390 VM_OBJECT_WUNLOCK(fs.object); 1391 break; /* break to PAGE HAS BEEN FOUND. */ 1392 } 1393 } 1394 VM_OBJECT_ASSERT_WLOCKED(fs.object); 1395 1396 /* 1397 * Page is not resident. If the pager might contain the page 1398 * or this is the beginning of the search, allocate a new 1399 * page. (Default objects are zero-fill, so there is no real 1400 * pager for them.) 1401 */ 1402 if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1403 fs.object == fs.first_object)) { 1404 rv = vm_fault_allocate(&fs); 1405 switch (rv) { 1406 case KERN_RESTART: 1407 unlock_and_deallocate(&fs); 1408 /* FALLTHROUGH */ 1409 case KERN_RESOURCE_SHORTAGE: 1410 goto RetryFault; 1411 case KERN_SUCCESS: 1412 case KERN_FAILURE: 1413 case KERN_OUT_OF_BOUNDS: 1414 unlock_and_deallocate(&fs); 1415 return (rv); 1416 case KERN_NOT_RECEIVER: 1417 break; 1418 default: 1419 panic("vm_fault: Unhandled rv %d", rv); 1420 } 1421 } 1422 1423 /* 1424 * Default objects have no pager so no exclusive busy exists 1425 * to protect this page in the chain. Skip to the next 1426 * object without dropping the lock to preserve atomicity of 1427 * shadow faults. 1428 */ 1429 if (fs.object->type != OBJT_DEFAULT) { 1430 /* 1431 * At this point, we have either allocated a new page 1432 * or found an existing page that is only partially 1433 * valid. 1434 * 1435 * We hold a reference on the current object and the 1436 * page is exclusive busied. The exclusive busy 1437 * prevents simultaneous faults and collapses while 1438 * the object lock is dropped. 1439 */ 1440 VM_OBJECT_WUNLOCK(fs.object); 1441 1442 /* 1443 * If the pager for the current object might have 1444 * the page, then determine the number of additional 1445 * pages to read and potentially reprioritize 1446 * previously read pages for earlier reclamation. 1447 * These operations should only be performed once per 1448 * page fault. Even if the current pager doesn't 1449 * have the page, the number of additional pages to 1450 * read will apply to subsequent objects in the 1451 * shadow chain. 1452 */ 1453 if (nera == -1 && !P_KILLED(curproc)) 1454 nera = vm_fault_readahead(&fs); 1455 1456 rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 1457 if (rv == KERN_SUCCESS) { 1458 faultcount = behind + 1 + ahead; 1459 hardfault = true; 1460 break; /* break to PAGE HAS BEEN FOUND. */ 1461 } 1462 if (rv == KERN_RESOURCE_SHORTAGE) 1463 goto RetryFault; 1464 VM_OBJECT_WLOCK(fs.object); 1465 if (rv == KERN_OUT_OF_BOUNDS) { 1466 fault_page_free(&fs.m); 1467 unlock_and_deallocate(&fs); 1468 return (rv); 1469 } 1470 } 1471 1472 /* 1473 * The page was not found in the current object. Try to 1474 * traverse into a backing object or zero fill if none is 1475 * found. 1476 */ 1477 if (vm_fault_next(&fs)) 1478 continue; 1479 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1480 if (fs.first_object == fs.object) 1481 fault_page_free(&fs.first_m); 1482 unlock_and_deallocate(&fs); 1483 return (KERN_OUT_OF_BOUNDS); 1484 } 1485 VM_OBJECT_WUNLOCK(fs.object); 1486 vm_fault_zerofill(&fs); 1487 /* Don't try to prefault neighboring pages. */ 1488 faultcount = 1; 1489 break; /* break to PAGE HAS BEEN FOUND. */ 1490 } 1491 1492 /* 1493 * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 1494 * busied. The object lock must no longer be held. 1495 */ 1496 vm_page_assert_xbusied(fs.m); 1497 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1498 1499 /* 1500 * If the page is being written, but isn't already owned by the 1501 * top-level object, we have to copy it into a new page owned by the 1502 * top-level object. 1503 */ 1504 if (fs.object != fs.first_object) { 1505 /* 1506 * We only really need to copy if we want to write it. 1507 */ 1508 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1509 vm_fault_cow(&fs); 1510 /* 1511 * We only try to prefault read-only mappings to the 1512 * neighboring pages when this copy-on-write fault is 1513 * a hard fault. In other cases, trying to prefault 1514 * is typically wasted effort. 1515 */ 1516 if (faultcount == 0) 1517 faultcount = 1; 1518 1519 } else { 1520 fs.prot &= ~VM_PROT_WRITE; 1521 } 1522 } 1523 1524 /* 1525 * We must verify that the maps have not changed since our last 1526 * lookup. 1527 */ 1528 if (!fs.lookup_still_valid) { 1529 result = vm_fault_relookup(&fs); 1530 if (result != KERN_SUCCESS) { 1531 fault_deallocate(&fs); 1532 if (result == KERN_RESTART) 1533 goto RetryFault; 1534 return (result); 1535 } 1536 } 1537 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1538 1539 /* 1540 * If the page was filled by a pager, save the virtual address that 1541 * should be faulted on next under a sequential access pattern to the 1542 * map entry. A read lock on the map suffices to update this address 1543 * safely. 1544 */ 1545 if (hardfault) 1546 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1547 1548 /* 1549 * Page must be completely valid or it is not fit to 1550 * map into user space. vm_pager_get_pages() ensures this. 1551 */ 1552 vm_page_assert_xbusied(fs.m); 1553 KASSERT(vm_page_all_valid(fs.m), 1554 ("vm_fault: page %p partially invalid", fs.m)); 1555 1556 vm_fault_dirty(&fs, fs.m); 1557 1558 /* 1559 * Put this page into the physical map. We had to do the unlock above 1560 * because pmap_enter() may sleep. We don't put the page 1561 * back on the active queue until later so that the pageout daemon 1562 * won't find it (yet). 1563 */ 1564 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1565 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1566 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1567 fs.wired == 0) 1568 vm_fault_prefault(&fs, vaddr, 1569 faultcount > 0 ? behind : PFBAK, 1570 faultcount > 0 ? ahead : PFFOR, false); 1571 1572 /* 1573 * If the page is not wired down, then put it where the pageout daemon 1574 * can find it. 1575 */ 1576 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1577 vm_page_wire(fs.m); 1578 else 1579 vm_page_activate(fs.m); 1580 if (fs.m_hold != NULL) { 1581 (*fs.m_hold) = fs.m; 1582 vm_page_wire(fs.m); 1583 } 1584 vm_page_xunbusy(fs.m); 1585 fs.m = NULL; 1586 1587 /* 1588 * Unlock everything, and return 1589 */ 1590 fault_deallocate(&fs); 1591 if (hardfault) { 1592 VM_CNT_INC(v_io_faults); 1593 curthread->td_ru.ru_majflt++; 1594 #ifdef RACCT 1595 if (racct_enable && fs.object->type == OBJT_VNODE) { 1596 PROC_LOCK(curproc); 1597 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1598 racct_add_force(curproc, RACCT_WRITEBPS, 1599 PAGE_SIZE + behind * PAGE_SIZE); 1600 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1601 } else { 1602 racct_add_force(curproc, RACCT_READBPS, 1603 PAGE_SIZE + ahead * PAGE_SIZE); 1604 racct_add_force(curproc, RACCT_READIOPS, 1); 1605 } 1606 PROC_UNLOCK(curproc); 1607 } 1608 #endif 1609 } else 1610 curthread->td_ru.ru_minflt++; 1611 1612 return (KERN_SUCCESS); 1613 } 1614 1615 /* 1616 * Speed up the reclamation of pages that precede the faulting pindex within 1617 * the first object of the shadow chain. Essentially, perform the equivalent 1618 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1619 * the faulting pindex by the cluster size when the pages read by vm_fault() 1620 * cross a cluster-size boundary. The cluster size is the greater of the 1621 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1622 * 1623 * When "fs->first_object" is a shadow object, the pages in the backing object 1624 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1625 * function must only be concerned with pages in the first object. 1626 */ 1627 static void 1628 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1629 { 1630 vm_map_entry_t entry; 1631 vm_object_t first_object, object; 1632 vm_offset_t end, start; 1633 vm_page_t m, m_next; 1634 vm_pindex_t pend, pstart; 1635 vm_size_t size; 1636 1637 object = fs->object; 1638 VM_OBJECT_ASSERT_UNLOCKED(object); 1639 first_object = fs->first_object; 1640 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1641 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1642 VM_OBJECT_RLOCK(first_object); 1643 size = VM_FAULT_DONTNEED_MIN; 1644 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1645 size = pagesizes[1]; 1646 end = rounddown2(vaddr, size); 1647 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1648 (entry = fs->entry)->start < end) { 1649 if (end - entry->start < size) 1650 start = entry->start; 1651 else 1652 start = end - size; 1653 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1654 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1655 entry->start); 1656 m_next = vm_page_find_least(first_object, pstart); 1657 pend = OFF_TO_IDX(entry->offset) + atop(end - 1658 entry->start); 1659 while ((m = m_next) != NULL && m->pindex < pend) { 1660 m_next = TAILQ_NEXT(m, listq); 1661 if (!vm_page_all_valid(m) || 1662 vm_page_busied(m)) 1663 continue; 1664 1665 /* 1666 * Don't clear PGA_REFERENCED, since it would 1667 * likely represent a reference by a different 1668 * process. 1669 * 1670 * Typically, at this point, prefetched pages 1671 * are still in the inactive queue. Only 1672 * pages that triggered page faults are in the 1673 * active queue. The test for whether the page 1674 * is in the inactive queue is racy; in the 1675 * worst case we will requeue the page 1676 * unnecessarily. 1677 */ 1678 if (!vm_page_inactive(m)) 1679 vm_page_deactivate(m); 1680 } 1681 } 1682 VM_OBJECT_RUNLOCK(first_object); 1683 } 1684 } 1685 1686 /* 1687 * vm_fault_prefault provides a quick way of clustering 1688 * pagefaults into a processes address space. It is a "cousin" 1689 * of vm_map_pmap_enter, except it runs at page fault time instead 1690 * of mmap time. 1691 */ 1692 static void 1693 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1694 int backward, int forward, bool obj_locked) 1695 { 1696 pmap_t pmap; 1697 vm_map_entry_t entry; 1698 vm_object_t backing_object, lobject; 1699 vm_offset_t addr, starta; 1700 vm_pindex_t pindex; 1701 vm_page_t m; 1702 int i; 1703 1704 pmap = fs->map->pmap; 1705 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1706 return; 1707 1708 entry = fs->entry; 1709 1710 if (addra < backward * PAGE_SIZE) { 1711 starta = entry->start; 1712 } else { 1713 starta = addra - backward * PAGE_SIZE; 1714 if (starta < entry->start) 1715 starta = entry->start; 1716 } 1717 1718 /* 1719 * Generate the sequence of virtual addresses that are candidates for 1720 * prefaulting in an outward spiral from the faulting virtual address, 1721 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1722 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1723 * If the candidate address doesn't have a backing physical page, then 1724 * the loop immediately terminates. 1725 */ 1726 for (i = 0; i < 2 * imax(backward, forward); i++) { 1727 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1728 PAGE_SIZE); 1729 if (addr > addra + forward * PAGE_SIZE) 1730 addr = 0; 1731 1732 if (addr < starta || addr >= entry->end) 1733 continue; 1734 1735 if (!pmap_is_prefaultable(pmap, addr)) 1736 continue; 1737 1738 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1739 lobject = entry->object.vm_object; 1740 if (!obj_locked) 1741 VM_OBJECT_RLOCK(lobject); 1742 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1743 lobject->type == OBJT_DEFAULT && 1744 (backing_object = lobject->backing_object) != NULL) { 1745 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1746 0, ("vm_fault_prefault: unaligned object offset")); 1747 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1748 VM_OBJECT_RLOCK(backing_object); 1749 if (!obj_locked || lobject != entry->object.vm_object) 1750 VM_OBJECT_RUNLOCK(lobject); 1751 lobject = backing_object; 1752 } 1753 if (m == NULL) { 1754 if (!obj_locked || lobject != entry->object.vm_object) 1755 VM_OBJECT_RUNLOCK(lobject); 1756 break; 1757 } 1758 if (vm_page_all_valid(m) && 1759 (m->flags & PG_FICTITIOUS) == 0) 1760 pmap_enter_quick(pmap, addr, m, entry->protection); 1761 if (!obj_locked || lobject != entry->object.vm_object) 1762 VM_OBJECT_RUNLOCK(lobject); 1763 } 1764 } 1765 1766 /* 1767 * Hold each of the physical pages that are mapped by the specified range of 1768 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1769 * and allow the specified types of access, "prot". If all of the implied 1770 * pages are successfully held, then the number of held pages is returned 1771 * together with pointers to those pages in the array "ma". However, if any 1772 * of the pages cannot be held, -1 is returned. 1773 */ 1774 int 1775 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1776 vm_prot_t prot, vm_page_t *ma, int max_count) 1777 { 1778 vm_offset_t end, va; 1779 vm_page_t *mp; 1780 int count; 1781 boolean_t pmap_failed; 1782 1783 if (len == 0) 1784 return (0); 1785 end = round_page(addr + len); 1786 addr = trunc_page(addr); 1787 1788 if (!vm_map_range_valid(map, addr, end)) 1789 return (-1); 1790 1791 if (atop(end - addr) > max_count) 1792 panic("vm_fault_quick_hold_pages: count > max_count"); 1793 count = atop(end - addr); 1794 1795 /* 1796 * Most likely, the physical pages are resident in the pmap, so it is 1797 * faster to try pmap_extract_and_hold() first. 1798 */ 1799 pmap_failed = FALSE; 1800 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1801 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1802 if (*mp == NULL) 1803 pmap_failed = TRUE; 1804 else if ((prot & VM_PROT_WRITE) != 0 && 1805 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1806 /* 1807 * Explicitly dirty the physical page. Otherwise, the 1808 * caller's changes may go unnoticed because they are 1809 * performed through an unmanaged mapping or by a DMA 1810 * operation. 1811 * 1812 * The object lock is not held here. 1813 * See vm_page_clear_dirty_mask(). 1814 */ 1815 vm_page_dirty(*mp); 1816 } 1817 } 1818 if (pmap_failed) { 1819 /* 1820 * One or more pages could not be held by the pmap. Either no 1821 * page was mapped at the specified virtual address or that 1822 * mapping had insufficient permissions. Attempt to fault in 1823 * and hold these pages. 1824 * 1825 * If vm_fault_disable_pagefaults() was called, 1826 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1827 * acquire MD VM locks, which means we must not call 1828 * vm_fault(). Some (out of tree) callers mark 1829 * too wide a code area with vm_fault_disable_pagefaults() 1830 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1831 * the proper behaviour explicitly. 1832 */ 1833 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1834 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1835 goto error; 1836 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1837 if (*mp == NULL && vm_fault(map, va, prot, 1838 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1839 goto error; 1840 } 1841 return (count); 1842 error: 1843 for (mp = ma; mp < ma + count; mp++) 1844 if (*mp != NULL) 1845 vm_page_unwire(*mp, PQ_INACTIVE); 1846 return (-1); 1847 } 1848 1849 /* 1850 * Routine: 1851 * vm_fault_copy_entry 1852 * Function: 1853 * Create new shadow object backing dst_entry with private copy of 1854 * all underlying pages. When src_entry is equal to dst_entry, 1855 * function implements COW for wired-down map entry. Otherwise, 1856 * it forks wired entry into dst_map. 1857 * 1858 * In/out conditions: 1859 * The source and destination maps must be locked for write. 1860 * The source map entry must be wired down (or be a sharing map 1861 * entry corresponding to a main map entry that is wired down). 1862 */ 1863 void 1864 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1865 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1866 vm_ooffset_t *fork_charge) 1867 { 1868 vm_object_t backing_object, dst_object, object, src_object; 1869 vm_pindex_t dst_pindex, pindex, src_pindex; 1870 vm_prot_t access, prot; 1871 vm_offset_t vaddr; 1872 vm_page_t dst_m; 1873 vm_page_t src_m; 1874 boolean_t upgrade; 1875 1876 #ifdef lint 1877 src_map++; 1878 #endif /* lint */ 1879 1880 upgrade = src_entry == dst_entry; 1881 access = prot = dst_entry->protection; 1882 1883 src_object = src_entry->object.vm_object; 1884 src_pindex = OFF_TO_IDX(src_entry->offset); 1885 1886 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1887 dst_object = src_object; 1888 vm_object_reference(dst_object); 1889 } else { 1890 /* 1891 * Create the top-level object for the destination entry. 1892 * Doesn't actually shadow anything - we copy the pages 1893 * directly. 1894 */ 1895 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1896 dst_entry->start), NULL, NULL, 0); 1897 #if VM_NRESERVLEVEL > 0 1898 dst_object->flags |= OBJ_COLORED; 1899 dst_object->pg_color = atop(dst_entry->start); 1900 #endif 1901 dst_object->domain = src_object->domain; 1902 dst_object->charge = dst_entry->end - dst_entry->start; 1903 } 1904 1905 VM_OBJECT_WLOCK(dst_object); 1906 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1907 ("vm_fault_copy_entry: vm_object not NULL")); 1908 if (src_object != dst_object) { 1909 dst_entry->object.vm_object = dst_object; 1910 dst_entry->offset = 0; 1911 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1912 } 1913 if (fork_charge != NULL) { 1914 KASSERT(dst_entry->cred == NULL, 1915 ("vm_fault_copy_entry: leaked swp charge")); 1916 dst_object->cred = curthread->td_ucred; 1917 crhold(dst_object->cred); 1918 *fork_charge += dst_object->charge; 1919 } else if ((dst_object->type == OBJT_DEFAULT || 1920 dst_object->type == OBJT_SWAP) && 1921 dst_object->cred == NULL) { 1922 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1923 dst_entry)); 1924 dst_object->cred = dst_entry->cred; 1925 dst_entry->cred = NULL; 1926 } 1927 1928 /* 1929 * If not an upgrade, then enter the mappings in the pmap as 1930 * read and/or execute accesses. Otherwise, enter them as 1931 * write accesses. 1932 * 1933 * A writeable large page mapping is only created if all of 1934 * the constituent small page mappings are modified. Marking 1935 * PTEs as modified on inception allows promotion to happen 1936 * without taking potentially large number of soft faults. 1937 */ 1938 if (!upgrade) 1939 access &= ~VM_PROT_WRITE; 1940 1941 /* 1942 * Loop through all of the virtual pages within the entry's 1943 * range, copying each page from the source object to the 1944 * destination object. Since the source is wired, those pages 1945 * must exist. In contrast, the destination is pageable. 1946 * Since the destination object doesn't share any backing storage 1947 * with the source object, all of its pages must be dirtied, 1948 * regardless of whether they can be written. 1949 */ 1950 for (vaddr = dst_entry->start, dst_pindex = 0; 1951 vaddr < dst_entry->end; 1952 vaddr += PAGE_SIZE, dst_pindex++) { 1953 again: 1954 /* 1955 * Find the page in the source object, and copy it in. 1956 * Because the source is wired down, the page will be 1957 * in memory. 1958 */ 1959 if (src_object != dst_object) 1960 VM_OBJECT_RLOCK(src_object); 1961 object = src_object; 1962 pindex = src_pindex + dst_pindex; 1963 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1964 (backing_object = object->backing_object) != NULL) { 1965 /* 1966 * Unless the source mapping is read-only or 1967 * it is presently being upgraded from 1968 * read-only, the first object in the shadow 1969 * chain should provide all of the pages. In 1970 * other words, this loop body should never be 1971 * executed when the source mapping is already 1972 * read/write. 1973 */ 1974 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1975 upgrade, 1976 ("vm_fault_copy_entry: main object missing page")); 1977 1978 VM_OBJECT_RLOCK(backing_object); 1979 pindex += OFF_TO_IDX(object->backing_object_offset); 1980 if (object != dst_object) 1981 VM_OBJECT_RUNLOCK(object); 1982 object = backing_object; 1983 } 1984 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1985 1986 if (object != dst_object) { 1987 /* 1988 * Allocate a page in the destination object. 1989 */ 1990 dst_m = vm_page_alloc(dst_object, (src_object == 1991 dst_object ? src_pindex : 0) + dst_pindex, 1992 VM_ALLOC_NORMAL); 1993 if (dst_m == NULL) { 1994 VM_OBJECT_WUNLOCK(dst_object); 1995 VM_OBJECT_RUNLOCK(object); 1996 vm_wait(dst_object); 1997 VM_OBJECT_WLOCK(dst_object); 1998 goto again; 1999 } 2000 pmap_copy_page(src_m, dst_m); 2001 VM_OBJECT_RUNLOCK(object); 2002 dst_m->dirty = dst_m->valid = src_m->valid; 2003 } else { 2004 dst_m = src_m; 2005 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2006 goto again; 2007 if (dst_m->pindex >= dst_object->size) { 2008 /* 2009 * We are upgrading. Index can occur 2010 * out of bounds if the object type is 2011 * vnode and the file was truncated. 2012 */ 2013 vm_page_xunbusy(dst_m); 2014 break; 2015 } 2016 } 2017 VM_OBJECT_WUNLOCK(dst_object); 2018 2019 /* 2020 * Enter it in the pmap. If a wired, copy-on-write 2021 * mapping is being replaced by a write-enabled 2022 * mapping, then wire that new mapping. 2023 * 2024 * The page can be invalid if the user called 2025 * msync(MS_INVALIDATE) or truncated the backing vnode 2026 * or shared memory object. In this case, do not 2027 * insert it into pmap, but still do the copy so that 2028 * all copies of the wired map entry have similar 2029 * backing pages. 2030 */ 2031 if (vm_page_all_valid(dst_m)) { 2032 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2033 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2034 } 2035 2036 /* 2037 * Mark it no longer busy, and put it on the active list. 2038 */ 2039 VM_OBJECT_WLOCK(dst_object); 2040 2041 if (upgrade) { 2042 if (src_m != dst_m) { 2043 vm_page_unwire(src_m, PQ_INACTIVE); 2044 vm_page_wire(dst_m); 2045 } else { 2046 KASSERT(vm_page_wired(dst_m), 2047 ("dst_m %p is not wired", dst_m)); 2048 } 2049 } else { 2050 vm_page_activate(dst_m); 2051 } 2052 vm_page_xunbusy(dst_m); 2053 } 2054 VM_OBJECT_WUNLOCK(dst_object); 2055 if (upgrade) { 2056 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2057 vm_object_deallocate(src_object); 2058 } 2059 } 2060 2061 /* 2062 * Block entry into the machine-independent layer's page fault handler by 2063 * the calling thread. Subsequent calls to vm_fault() by that thread will 2064 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2065 * spurious page faults. 2066 */ 2067 int 2068 vm_fault_disable_pagefaults(void) 2069 { 2070 2071 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2072 } 2073 2074 void 2075 vm_fault_enable_pagefaults(int save) 2076 { 2077 2078 curthread_pflags_restore(save); 2079 } 2080