1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 /* Fault parameters. */ 124 vm_offset_t vaddr; 125 vm_page_t *m_hold; 126 vm_prot_t fault_type; 127 vm_prot_t prot; 128 int fault_flags; 129 int oom; 130 boolean_t wired; 131 132 /* Page reference for cow. */ 133 vm_page_t m_cow; 134 135 /* Current object. */ 136 vm_object_t object; 137 vm_pindex_t pindex; 138 vm_page_t m; 139 140 /* Top-level map object. */ 141 vm_object_t first_object; 142 vm_pindex_t first_pindex; 143 vm_page_t first_m; 144 145 /* Map state. */ 146 vm_map_t map; 147 vm_map_entry_t entry; 148 int map_generation; 149 bool lookup_still_valid; 150 151 /* Vnode if locked. */ 152 struct vnode *vp; 153 }; 154 155 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 156 int ahead); 157 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 158 int backward, int forward, bool obj_locked); 159 160 static int vm_pfault_oom_attempts = 3; 161 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 162 &vm_pfault_oom_attempts, 0, 163 "Number of page allocation attempts in page fault handler before it " 164 "triggers OOM handling"); 165 166 static int vm_pfault_oom_wait = 10; 167 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 168 &vm_pfault_oom_wait, 0, 169 "Number of seconds to wait for free pages before retrying " 170 "the page fault handler"); 171 172 static inline void 173 fault_page_release(vm_page_t *mp) 174 { 175 vm_page_t m; 176 177 m = *mp; 178 if (m != NULL) { 179 /* 180 * We are likely to loop around again and attempt to busy 181 * this page. Deactivating it leaves it available for 182 * pageout while optimizing fault restarts. 183 */ 184 vm_page_deactivate(m); 185 vm_page_xunbusy(m); 186 *mp = NULL; 187 } 188 } 189 190 static inline void 191 fault_page_free(vm_page_t *mp) 192 { 193 vm_page_t m; 194 195 m = *mp; 196 if (m != NULL) { 197 VM_OBJECT_ASSERT_WLOCKED(m->object); 198 if (!vm_page_wired(m)) 199 vm_page_free(m); 200 else 201 vm_page_xunbusy(m); 202 *mp = NULL; 203 } 204 } 205 206 static inline void 207 unlock_map(struct faultstate *fs) 208 { 209 210 if (fs->lookup_still_valid) { 211 vm_map_lookup_done(fs->map, fs->entry); 212 fs->lookup_still_valid = false; 213 } 214 } 215 216 static void 217 unlock_vp(struct faultstate *fs) 218 { 219 220 if (fs->vp != NULL) { 221 vput(fs->vp); 222 fs->vp = NULL; 223 } 224 } 225 226 static void 227 fault_deallocate(struct faultstate *fs) 228 { 229 230 fault_page_release(&fs->m_cow); 231 fault_page_release(&fs->m); 232 vm_object_pip_wakeup(fs->object); 233 if (fs->object != fs->first_object) { 234 VM_OBJECT_WLOCK(fs->first_object); 235 fault_page_free(&fs->first_m); 236 VM_OBJECT_WUNLOCK(fs->first_object); 237 vm_object_pip_wakeup(fs->first_object); 238 } 239 vm_object_deallocate(fs->first_object); 240 unlock_map(fs); 241 unlock_vp(fs); 242 } 243 244 static void 245 unlock_and_deallocate(struct faultstate *fs) 246 { 247 248 VM_OBJECT_WUNLOCK(fs->object); 249 fault_deallocate(fs); 250 } 251 252 static void 253 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 254 { 255 bool need_dirty; 256 257 if (((fs->prot & VM_PROT_WRITE) == 0 && 258 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 259 (m->oflags & VPO_UNMANAGED) != 0) 260 return; 261 262 VM_PAGE_OBJECT_BUSY_ASSERT(m); 263 264 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 265 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 266 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 267 268 vm_object_set_writeable_dirty(m->object); 269 270 /* 271 * If the fault is a write, we know that this page is being 272 * written NOW so dirty it explicitly to save on 273 * pmap_is_modified() calls later. 274 * 275 * Also, since the page is now dirty, we can possibly tell 276 * the pager to release any swap backing the page. 277 */ 278 if (need_dirty && vm_page_set_dirty(m) == 0) { 279 /* 280 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 281 * if the page is already dirty to prevent data written with 282 * the expectation of being synced from not being synced. 283 * Likewise if this entry does not request NOSYNC then make 284 * sure the page isn't marked NOSYNC. Applications sharing 285 * data should use the same flags to avoid ping ponging. 286 */ 287 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 288 vm_page_aflag_set(m, PGA_NOSYNC); 289 else 290 vm_page_aflag_clear(m, PGA_NOSYNC); 291 } 292 293 } 294 295 /* 296 * Unlocks fs.first_object and fs.map on success. 297 */ 298 static int 299 vm_fault_soft_fast(struct faultstate *fs) 300 { 301 vm_page_t m, m_map; 302 #if VM_NRESERVLEVEL > 0 303 vm_page_t m_super; 304 int flags; 305 #endif 306 int psind, rv; 307 vm_offset_t vaddr; 308 309 MPASS(fs->vp == NULL); 310 vaddr = fs->vaddr; 311 vm_object_busy(fs->first_object); 312 m = vm_page_lookup(fs->first_object, fs->first_pindex); 313 /* A busy page can be mapped for read|execute access. */ 314 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 315 vm_page_busied(m)) || !vm_page_all_valid(m)) { 316 rv = KERN_FAILURE; 317 goto out; 318 } 319 m_map = m; 320 psind = 0; 321 #if VM_NRESERVLEVEL > 0 322 if ((m->flags & PG_FICTITIOUS) == 0 && 323 (m_super = vm_reserv_to_superpage(m)) != NULL && 324 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 325 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 326 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 327 (pagesizes[m_super->psind] - 1)) && !fs->wired && 328 pmap_ps_enabled(fs->map->pmap)) { 329 flags = PS_ALL_VALID; 330 if ((fs->prot & VM_PROT_WRITE) != 0) { 331 /* 332 * Create a superpage mapping allowing write access 333 * only if none of the constituent pages are busy and 334 * all of them are already dirty (except possibly for 335 * the page that was faulted on). 336 */ 337 flags |= PS_NONE_BUSY; 338 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 339 flags |= PS_ALL_DIRTY; 340 } 341 if (vm_page_ps_test(m_super, flags, m)) { 342 m_map = m_super; 343 psind = m_super->psind; 344 vaddr = rounddown2(vaddr, pagesizes[psind]); 345 /* Preset the modified bit for dirty superpages. */ 346 if ((flags & PS_ALL_DIRTY) != 0) 347 fs->fault_type |= VM_PROT_WRITE; 348 } 349 } 350 #endif 351 rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 352 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 353 if (rv != KERN_SUCCESS) 354 goto out; 355 if (fs->m_hold != NULL) { 356 (*fs->m_hold) = m; 357 vm_page_wire(m); 358 } 359 if (psind == 0 && !fs->wired) 360 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 361 VM_OBJECT_RUNLOCK(fs->first_object); 362 vm_fault_dirty(fs, m); 363 vm_map_lookup_done(fs->map, fs->entry); 364 curthread->td_ru.ru_minflt++; 365 366 out: 367 vm_object_unbusy(fs->first_object); 368 return (rv); 369 } 370 371 static void 372 vm_fault_restore_map_lock(struct faultstate *fs) 373 { 374 375 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 376 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 377 378 if (!vm_map_trylock_read(fs->map)) { 379 VM_OBJECT_WUNLOCK(fs->first_object); 380 vm_map_lock_read(fs->map); 381 VM_OBJECT_WLOCK(fs->first_object); 382 } 383 fs->lookup_still_valid = true; 384 } 385 386 static void 387 vm_fault_populate_check_page(vm_page_t m) 388 { 389 390 /* 391 * Check each page to ensure that the pager is obeying the 392 * interface: the page must be installed in the object, fully 393 * valid, and exclusively busied. 394 */ 395 MPASS(m != NULL); 396 MPASS(vm_page_all_valid(m)); 397 MPASS(vm_page_xbusied(m)); 398 } 399 400 static void 401 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 402 vm_pindex_t last) 403 { 404 vm_page_t m; 405 vm_pindex_t pidx; 406 407 VM_OBJECT_ASSERT_WLOCKED(object); 408 MPASS(first <= last); 409 for (pidx = first, m = vm_page_lookup(object, pidx); 410 pidx <= last; pidx++, m = vm_page_next(m)) { 411 vm_fault_populate_check_page(m); 412 vm_page_deactivate(m); 413 vm_page_xunbusy(m); 414 } 415 } 416 417 static int 418 vm_fault_populate(struct faultstate *fs) 419 { 420 vm_offset_t vaddr; 421 vm_page_t m; 422 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 423 int bdry_idx, i, npages, psind, rv; 424 425 MPASS(fs->object == fs->first_object); 426 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 427 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 428 MPASS(fs->first_object->backing_object == NULL); 429 MPASS(fs->lookup_still_valid); 430 431 pager_first = OFF_TO_IDX(fs->entry->offset); 432 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 433 unlock_map(fs); 434 unlock_vp(fs); 435 436 /* 437 * Call the pager (driver) populate() method. 438 * 439 * There is no guarantee that the method will be called again 440 * if the current fault is for read, and a future fault is 441 * for write. Report the entry's maximum allowed protection 442 * to the driver. 443 */ 444 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 445 fs->fault_type, fs->entry->max_protection, &pager_first, 446 &pager_last); 447 448 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 449 if (rv == VM_PAGER_BAD) { 450 /* 451 * VM_PAGER_BAD is the backdoor for a pager to request 452 * normal fault handling. 453 */ 454 vm_fault_restore_map_lock(fs); 455 if (fs->map->timestamp != fs->map_generation) 456 return (KERN_RESTART); 457 return (KERN_NOT_RECEIVER); 458 } 459 if (rv != VM_PAGER_OK) 460 return (KERN_FAILURE); /* AKA SIGSEGV */ 461 462 /* Ensure that the driver is obeying the interface. */ 463 MPASS(pager_first <= pager_last); 464 MPASS(fs->first_pindex <= pager_last); 465 MPASS(fs->first_pindex >= pager_first); 466 MPASS(pager_last < fs->first_object->size); 467 468 vm_fault_restore_map_lock(fs); 469 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 470 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 471 if (fs->map->timestamp != fs->map_generation) { 472 if (bdry_idx == 0) { 473 vm_fault_populate_cleanup(fs->first_object, pager_first, 474 pager_last); 475 } else { 476 m = vm_page_lookup(fs->first_object, pager_first); 477 if (m != fs->m) 478 vm_page_xunbusy(m); 479 } 480 return (KERN_RESTART); 481 } 482 483 /* 484 * The map is unchanged after our last unlock. Process the fault. 485 * 486 * First, the special case of largepage mappings, where 487 * populate only busies the first page in superpage run. 488 */ 489 if (bdry_idx != 0) { 490 m = vm_page_lookup(fs->first_object, pager_first); 491 vm_fault_populate_check_page(m); 492 VM_OBJECT_WUNLOCK(fs->first_object); 493 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 494 fs->entry->offset; 495 /* assert alignment for entry */ 496 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 497 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 498 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 499 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 500 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 501 ("unaligned superpage m %p %#jx", m, 502 (uintmax_t)VM_PAGE_TO_PHYS(m))); 503 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 504 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 505 PMAP_ENTER_LARGEPAGE, bdry_idx); 506 VM_OBJECT_WLOCK(fs->first_object); 507 vm_page_xunbusy(m); 508 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 509 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 510 vm_page_wire(m + i); 511 } 512 if (fs->m_hold != NULL) { 513 *fs->m_hold = m + (fs->first_pindex - pager_first); 514 vm_page_wire(*fs->m_hold); 515 } 516 goto out; 517 } 518 519 /* 520 * The range [pager_first, pager_last] that is given to the 521 * pager is only a hint. The pager may populate any range 522 * within the object that includes the requested page index. 523 * In case the pager expanded the range, clip it to fit into 524 * the map entry. 525 */ 526 map_first = OFF_TO_IDX(fs->entry->offset); 527 if (map_first > pager_first) { 528 vm_fault_populate_cleanup(fs->first_object, pager_first, 529 map_first - 1); 530 pager_first = map_first; 531 } 532 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 533 if (map_last < pager_last) { 534 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 535 pager_last); 536 pager_last = map_last; 537 } 538 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 539 pidx <= pager_last; 540 pidx += npages, m = vm_page_next(&m[npages - 1])) { 541 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 542 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 543 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 544 psind = m->psind; 545 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 546 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 547 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 548 psind = 0; 549 #else 550 psind = 0; 551 #endif 552 npages = atop(pagesizes[psind]); 553 for (i = 0; i < npages; i++) { 554 vm_fault_populate_check_page(&m[i]); 555 vm_fault_dirty(fs, &m[i]); 556 } 557 VM_OBJECT_WUNLOCK(fs->first_object); 558 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 559 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 560 #if defined(__amd64__) 561 if (psind > 0 && rv == KERN_FAILURE) { 562 for (i = 0; i < npages; i++) { 563 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 564 &m[i], fs->prot, fs->fault_type | 565 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 566 MPASS(rv == KERN_SUCCESS); 567 } 568 } 569 #else 570 MPASS(rv == KERN_SUCCESS); 571 #endif 572 VM_OBJECT_WLOCK(fs->first_object); 573 for (i = 0; i < npages; i++) { 574 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 575 vm_page_wire(&m[i]); 576 else 577 vm_page_activate(&m[i]); 578 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 579 (*fs->m_hold) = &m[i]; 580 vm_page_wire(&m[i]); 581 } 582 vm_page_xunbusy(&m[i]); 583 } 584 } 585 out: 586 curthread->td_ru.ru_majflt++; 587 return (KERN_SUCCESS); 588 } 589 590 static int prot_fault_translation; 591 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 592 &prot_fault_translation, 0, 593 "Control signal to deliver on protection fault"); 594 595 /* compat definition to keep common code for signal translation */ 596 #define UCODE_PAGEFLT 12 597 #ifdef T_PAGEFLT 598 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 599 #endif 600 601 /* 602 * vm_fault_trap: 603 * 604 * Handle a page fault occurring at the given address, 605 * requiring the given permissions, in the map specified. 606 * If successful, the page is inserted into the 607 * associated physical map. 608 * 609 * NOTE: the given address should be truncated to the 610 * proper page address. 611 * 612 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 613 * a standard error specifying why the fault is fatal is returned. 614 * 615 * The map in question must be referenced, and remains so. 616 * Caller may hold no locks. 617 */ 618 int 619 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 620 int fault_flags, int *signo, int *ucode) 621 { 622 int result; 623 624 MPASS(signo == NULL || ucode != NULL); 625 #ifdef KTRACE 626 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 627 ktrfault(vaddr, fault_type); 628 #endif 629 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 630 NULL); 631 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 632 result == KERN_INVALID_ADDRESS || 633 result == KERN_RESOURCE_SHORTAGE || 634 result == KERN_PROTECTION_FAILURE || 635 result == KERN_OUT_OF_BOUNDS, 636 ("Unexpected Mach error %d from vm_fault()", result)); 637 #ifdef KTRACE 638 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 639 ktrfaultend(result); 640 #endif 641 if (result != KERN_SUCCESS && signo != NULL) { 642 switch (result) { 643 case KERN_FAILURE: 644 case KERN_INVALID_ADDRESS: 645 *signo = SIGSEGV; 646 *ucode = SEGV_MAPERR; 647 break; 648 case KERN_RESOURCE_SHORTAGE: 649 *signo = SIGBUS; 650 *ucode = BUS_OOMERR; 651 break; 652 case KERN_OUT_OF_BOUNDS: 653 *signo = SIGBUS; 654 *ucode = BUS_OBJERR; 655 break; 656 case KERN_PROTECTION_FAILURE: 657 if (prot_fault_translation == 0) { 658 /* 659 * Autodetect. This check also covers 660 * the images without the ABI-tag ELF 661 * note. 662 */ 663 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 664 curproc->p_osrel >= P_OSREL_SIGSEGV) { 665 *signo = SIGSEGV; 666 *ucode = SEGV_ACCERR; 667 } else { 668 *signo = SIGBUS; 669 *ucode = UCODE_PAGEFLT; 670 } 671 } else if (prot_fault_translation == 1) { 672 /* Always compat mode. */ 673 *signo = SIGBUS; 674 *ucode = UCODE_PAGEFLT; 675 } else { 676 /* Always SIGSEGV mode. */ 677 *signo = SIGSEGV; 678 *ucode = SEGV_ACCERR; 679 } 680 break; 681 default: 682 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 683 result)); 684 break; 685 } 686 } 687 return (result); 688 } 689 690 static int 691 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 692 { 693 struct vnode *vp; 694 int error, locked; 695 696 if (fs->object->type != OBJT_VNODE) 697 return (KERN_SUCCESS); 698 vp = fs->object->handle; 699 if (vp == fs->vp) { 700 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 701 return (KERN_SUCCESS); 702 } 703 704 /* 705 * Perform an unlock in case the desired vnode changed while 706 * the map was unlocked during a retry. 707 */ 708 unlock_vp(fs); 709 710 locked = VOP_ISLOCKED(vp); 711 if (locked != LK_EXCLUSIVE) 712 locked = LK_SHARED; 713 714 /* 715 * We must not sleep acquiring the vnode lock while we have 716 * the page exclusive busied or the object's 717 * paging-in-progress count incremented. Otherwise, we could 718 * deadlock. 719 */ 720 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 721 if (error == 0) { 722 fs->vp = vp; 723 return (KERN_SUCCESS); 724 } 725 726 vhold(vp); 727 if (objlocked) 728 unlock_and_deallocate(fs); 729 else 730 fault_deallocate(fs); 731 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 732 vdrop(vp); 733 fs->vp = vp; 734 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 735 return (KERN_RESOURCE_SHORTAGE); 736 } 737 738 /* 739 * Calculate the desired readahead. Handle drop-behind. 740 * 741 * Returns the number of readahead blocks to pass to the pager. 742 */ 743 static int 744 vm_fault_readahead(struct faultstate *fs) 745 { 746 int era, nera; 747 u_char behavior; 748 749 KASSERT(fs->lookup_still_valid, ("map unlocked")); 750 era = fs->entry->read_ahead; 751 behavior = vm_map_entry_behavior(fs->entry); 752 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 753 nera = 0; 754 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 755 nera = VM_FAULT_READ_AHEAD_MAX; 756 if (fs->vaddr == fs->entry->next_read) 757 vm_fault_dontneed(fs, fs->vaddr, nera); 758 } else if (fs->vaddr == fs->entry->next_read) { 759 /* 760 * This is a sequential fault. Arithmetically 761 * increase the requested number of pages in 762 * the read-ahead window. The requested 763 * number of pages is "# of sequential faults 764 * x (read ahead min + 1) + read ahead min" 765 */ 766 nera = VM_FAULT_READ_AHEAD_MIN; 767 if (era > 0) { 768 nera += era + 1; 769 if (nera > VM_FAULT_READ_AHEAD_MAX) 770 nera = VM_FAULT_READ_AHEAD_MAX; 771 } 772 if (era == VM_FAULT_READ_AHEAD_MAX) 773 vm_fault_dontneed(fs, fs->vaddr, nera); 774 } else { 775 /* 776 * This is a non-sequential fault. 777 */ 778 nera = 0; 779 } 780 if (era != nera) { 781 /* 782 * A read lock on the map suffices to update 783 * the read ahead count safely. 784 */ 785 fs->entry->read_ahead = nera; 786 } 787 788 return (nera); 789 } 790 791 static int 792 vm_fault_lookup(struct faultstate *fs) 793 { 794 int result; 795 796 KASSERT(!fs->lookup_still_valid, 797 ("vm_fault_lookup: Map already locked.")); 798 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 799 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 800 &fs->first_pindex, &fs->prot, &fs->wired); 801 if (result != KERN_SUCCESS) { 802 unlock_vp(fs); 803 return (result); 804 } 805 806 fs->map_generation = fs->map->timestamp; 807 808 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 809 panic("%s: fault on nofault entry, addr: %#lx", 810 __func__, (u_long)fs->vaddr); 811 } 812 813 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 814 fs->entry->wiring_thread != curthread) { 815 vm_map_unlock_read(fs->map); 816 vm_map_lock(fs->map); 817 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 818 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 819 unlock_vp(fs); 820 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 821 vm_map_unlock_and_wait(fs->map, 0); 822 } else 823 vm_map_unlock(fs->map); 824 return (KERN_RESOURCE_SHORTAGE); 825 } 826 827 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 828 829 if (fs->wired) 830 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 831 else 832 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 833 ("!fs->wired && VM_FAULT_WIRE")); 834 fs->lookup_still_valid = true; 835 836 return (KERN_SUCCESS); 837 } 838 839 static int 840 vm_fault_relookup(struct faultstate *fs) 841 { 842 vm_object_t retry_object; 843 vm_pindex_t retry_pindex; 844 vm_prot_t retry_prot; 845 int result; 846 847 if (!vm_map_trylock_read(fs->map)) 848 return (KERN_RESTART); 849 850 fs->lookup_still_valid = true; 851 if (fs->map->timestamp == fs->map_generation) 852 return (KERN_SUCCESS); 853 854 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 855 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 856 &fs->wired); 857 if (result != KERN_SUCCESS) { 858 /* 859 * If retry of map lookup would have blocked then 860 * retry fault from start. 861 */ 862 if (result == KERN_FAILURE) 863 return (KERN_RESTART); 864 return (result); 865 } 866 if (retry_object != fs->first_object || 867 retry_pindex != fs->first_pindex) 868 return (KERN_RESTART); 869 870 /* 871 * Check whether the protection has changed or the object has 872 * been copied while we left the map unlocked. Changing from 873 * read to write permission is OK - we leave the page 874 * write-protected, and catch the write fault. Changing from 875 * write to read permission means that we can't mark the page 876 * write-enabled after all. 877 */ 878 fs->prot &= retry_prot; 879 fs->fault_type &= retry_prot; 880 if (fs->prot == 0) 881 return (KERN_RESTART); 882 883 /* Reassert because wired may have changed. */ 884 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 885 ("!wired && VM_FAULT_WIRE")); 886 887 return (KERN_SUCCESS); 888 } 889 890 static void 891 vm_fault_cow(struct faultstate *fs) 892 { 893 bool is_first_object_locked; 894 895 /* 896 * This allows pages to be virtually copied from a backing_object 897 * into the first_object, where the backing object has no other 898 * refs to it, and cannot gain any more refs. Instead of a bcopy, 899 * we just move the page from the backing object to the first 900 * object. Note that we must mark the page dirty in the first 901 * object so that it will go out to swap when needed. 902 */ 903 is_first_object_locked = false; 904 if ( 905 /* 906 * Only one shadow object and no other refs. 907 */ 908 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 909 /* 910 * No other ways to look the object up 911 */ 912 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 913 /* 914 * We don't chase down the shadow chain and we can acquire locks. 915 */ 916 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 917 fs->object == fs->first_object->backing_object && 918 VM_OBJECT_TRYWLOCK(fs->object)) { 919 /* 920 * Remove but keep xbusy for replace. fs->m is moved into 921 * fs->first_object and left busy while fs->first_m is 922 * conditionally freed. 923 */ 924 vm_page_remove_xbusy(fs->m); 925 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 926 fs->first_m); 927 vm_page_dirty(fs->m); 928 #if VM_NRESERVLEVEL > 0 929 /* 930 * Rename the reservation. 931 */ 932 vm_reserv_rename(fs->m, fs->first_object, fs->object, 933 OFF_TO_IDX(fs->first_object->backing_object_offset)); 934 #endif 935 VM_OBJECT_WUNLOCK(fs->object); 936 VM_OBJECT_WUNLOCK(fs->first_object); 937 fs->first_m = fs->m; 938 fs->m = NULL; 939 VM_CNT_INC(v_cow_optim); 940 } else { 941 if (is_first_object_locked) 942 VM_OBJECT_WUNLOCK(fs->first_object); 943 /* 944 * Oh, well, lets copy it. 945 */ 946 pmap_copy_page(fs->m, fs->first_m); 947 vm_page_valid(fs->first_m); 948 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 949 vm_page_wire(fs->first_m); 950 vm_page_unwire(fs->m, PQ_INACTIVE); 951 } 952 /* 953 * Save the cow page to be released after 954 * pmap_enter is complete. 955 */ 956 fs->m_cow = fs->m; 957 fs->m = NULL; 958 } 959 /* 960 * fs->object != fs->first_object due to above 961 * conditional 962 */ 963 vm_object_pip_wakeup(fs->object); 964 965 /* 966 * Only use the new page below... 967 */ 968 fs->object = fs->first_object; 969 fs->pindex = fs->first_pindex; 970 fs->m = fs->first_m; 971 VM_CNT_INC(v_cow_faults); 972 curthread->td_cow++; 973 } 974 975 static bool 976 vm_fault_next(struct faultstate *fs) 977 { 978 vm_object_t next_object; 979 980 /* 981 * The requested page does not exist at this object/ 982 * offset. Remove the invalid page from the object, 983 * waking up anyone waiting for it, and continue on to 984 * the next object. However, if this is the top-level 985 * object, we must leave the busy page in place to 986 * prevent another process from rushing past us, and 987 * inserting the page in that object at the same time 988 * that we are. 989 */ 990 if (fs->object == fs->first_object) { 991 fs->first_m = fs->m; 992 fs->m = NULL; 993 } else 994 fault_page_free(&fs->m); 995 996 /* 997 * Move on to the next object. Lock the next object before 998 * unlocking the current one. 999 */ 1000 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1001 next_object = fs->object->backing_object; 1002 if (next_object == NULL) 1003 return (false); 1004 MPASS(fs->first_m != NULL); 1005 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1006 VM_OBJECT_WLOCK(next_object); 1007 vm_object_pip_add(next_object, 1); 1008 if (fs->object != fs->first_object) 1009 vm_object_pip_wakeup(fs->object); 1010 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1011 VM_OBJECT_WUNLOCK(fs->object); 1012 fs->object = next_object; 1013 1014 return (true); 1015 } 1016 1017 static void 1018 vm_fault_zerofill(struct faultstate *fs) 1019 { 1020 1021 /* 1022 * If there's no object left, fill the page in the top 1023 * object with zeros. 1024 */ 1025 if (fs->object != fs->first_object) { 1026 vm_object_pip_wakeup(fs->object); 1027 fs->object = fs->first_object; 1028 fs->pindex = fs->first_pindex; 1029 } 1030 MPASS(fs->first_m != NULL); 1031 MPASS(fs->m == NULL); 1032 fs->m = fs->first_m; 1033 fs->first_m = NULL; 1034 1035 /* 1036 * Zero the page if necessary and mark it valid. 1037 */ 1038 if ((fs->m->flags & PG_ZERO) == 0) { 1039 pmap_zero_page(fs->m); 1040 } else { 1041 VM_CNT_INC(v_ozfod); 1042 } 1043 VM_CNT_INC(v_zfod); 1044 vm_page_valid(fs->m); 1045 } 1046 1047 /* 1048 * Allocate a page directly or via the object populate method. 1049 */ 1050 static int 1051 vm_fault_allocate(struct faultstate *fs) 1052 { 1053 struct domainset *dset; 1054 int alloc_req; 1055 int rv; 1056 1057 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1058 rv = vm_fault_lock_vnode(fs, true); 1059 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1060 if (rv == KERN_RESOURCE_SHORTAGE) 1061 return (rv); 1062 } 1063 1064 if (fs->pindex >= fs->object->size) 1065 return (KERN_OUT_OF_BOUNDS); 1066 1067 if (fs->object == fs->first_object && 1068 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1069 fs->first_object->shadow_count == 0) { 1070 rv = vm_fault_populate(fs); 1071 switch (rv) { 1072 case KERN_SUCCESS: 1073 case KERN_FAILURE: 1074 case KERN_RESTART: 1075 return (rv); 1076 case KERN_NOT_RECEIVER: 1077 /* 1078 * Pager's populate() method 1079 * returned VM_PAGER_BAD. 1080 */ 1081 break; 1082 default: 1083 panic("inconsistent return codes"); 1084 } 1085 } 1086 1087 /* 1088 * Allocate a new page for this object/offset pair. 1089 * 1090 * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1091 * might be not observed there, and allocation can fail, causing 1092 * restart and new reading of the p_flag. 1093 */ 1094 dset = fs->object->domain.dr_policy; 1095 if (dset == NULL) 1096 dset = curthread->td_domain.dr_policy; 1097 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1098 #if VM_NRESERVLEVEL > 0 1099 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1100 #endif 1101 alloc_req = P_KILLED(curproc) ? 1102 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1103 if (fs->object->type != OBJT_VNODE && 1104 fs->object->backing_object == NULL) 1105 alloc_req |= VM_ALLOC_ZERO; 1106 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1107 } 1108 if (fs->m == NULL) { 1109 unlock_and_deallocate(fs); 1110 if (vm_pfault_oom_attempts < 0 || 1111 fs->oom < vm_pfault_oom_attempts) { 1112 fs->oom++; 1113 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1114 } else { 1115 if (bootverbose) 1116 printf( 1117 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1118 curproc->p_pid, curproc->p_comm); 1119 vm_pageout_oom(VM_OOM_MEM_PF); 1120 fs->oom = 0; 1121 } 1122 return (KERN_RESOURCE_SHORTAGE); 1123 } 1124 fs->oom = 0; 1125 1126 return (KERN_NOT_RECEIVER); 1127 } 1128 1129 /* 1130 * Call the pager to retrieve the page if there is a chance 1131 * that the pager has it, and potentially retrieve additional 1132 * pages at the same time. 1133 */ 1134 static int 1135 vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 1136 { 1137 vm_offset_t e_end, e_start; 1138 int ahead, behind, cluster_offset, rv; 1139 u_char behavior; 1140 1141 /* 1142 * Prepare for unlocking the map. Save the map 1143 * entry's start and end addresses, which are used to 1144 * optimize the size of the pager operation below. 1145 * Even if the map entry's addresses change after 1146 * unlocking the map, using the saved addresses is 1147 * safe. 1148 */ 1149 e_start = fs->entry->start; 1150 e_end = fs->entry->end; 1151 behavior = vm_map_entry_behavior(fs->entry); 1152 1153 /* 1154 * Release the map lock before locking the vnode or 1155 * sleeping in the pager. (If the current object has 1156 * a shadow, then an earlier iteration of this loop 1157 * may have already unlocked the map.) 1158 */ 1159 unlock_map(fs); 1160 1161 rv = vm_fault_lock_vnode(fs, false); 1162 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1163 if (rv == KERN_RESOURCE_SHORTAGE) 1164 return (rv); 1165 KASSERT(fs->vp == NULL || !fs->map->system_map, 1166 ("vm_fault: vnode-backed object mapped by system map")); 1167 1168 /* 1169 * Page in the requested page and hint the pager, 1170 * that it may bring up surrounding pages. 1171 */ 1172 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1173 P_KILLED(curproc)) { 1174 behind = 0; 1175 ahead = 0; 1176 } else { 1177 /* Is this a sequential fault? */ 1178 if (nera > 0) { 1179 behind = 0; 1180 ahead = nera; 1181 } else { 1182 /* 1183 * Request a cluster of pages that is 1184 * aligned to a VM_FAULT_READ_DEFAULT 1185 * page offset boundary within the 1186 * object. Alignment to a page offset 1187 * boundary is more likely to coincide 1188 * with the underlying file system 1189 * block than alignment to a virtual 1190 * address boundary. 1191 */ 1192 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1193 behind = ulmin(cluster_offset, 1194 atop(fs->vaddr - e_start)); 1195 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1196 } 1197 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1198 } 1199 *behindp = behind; 1200 *aheadp = ahead; 1201 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1202 if (rv == VM_PAGER_OK) 1203 return (KERN_SUCCESS); 1204 if (rv == VM_PAGER_ERROR) 1205 printf("vm_fault: pager read error, pid %d (%s)\n", 1206 curproc->p_pid, curproc->p_comm); 1207 /* 1208 * If an I/O error occurred or the requested page was 1209 * outside the range of the pager, clean up and return 1210 * an error. 1211 */ 1212 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 1213 return (KERN_OUT_OF_BOUNDS); 1214 return (KERN_NOT_RECEIVER); 1215 } 1216 1217 /* 1218 * Wait/Retry if the page is busy. We have to do this if the page is 1219 * either exclusive or shared busy because the vm_pager may be using 1220 * read busy for pageouts (and even pageins if it is the vnode pager), 1221 * and we could end up trying to pagein and pageout the same page 1222 * simultaneously. 1223 * 1224 * We can theoretically allow the busy case on a read fault if the page 1225 * is marked valid, but since such pages are typically already pmap'd, 1226 * putting that special case in might be more effort then it is worth. 1227 * We cannot under any circumstances mess around with a shared busied 1228 * page except, perhaps, to pmap it. 1229 */ 1230 static void 1231 vm_fault_busy_sleep(struct faultstate *fs) 1232 { 1233 /* 1234 * Reference the page before unlocking and 1235 * sleeping so that the page daemon is less 1236 * likely to reclaim it. 1237 */ 1238 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1239 if (fs->object != fs->first_object) { 1240 fault_page_release(&fs->first_m); 1241 vm_object_pip_wakeup(fs->first_object); 1242 } 1243 vm_object_pip_wakeup(fs->object); 1244 unlock_map(fs); 1245 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 1246 vm_page_busy_sleep(fs->m, "vmpfw", false); 1247 else 1248 VM_OBJECT_WUNLOCK(fs->object); 1249 VM_CNT_INC(v_intrans); 1250 vm_object_deallocate(fs->first_object); 1251 } 1252 1253 int 1254 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1255 int fault_flags, vm_page_t *m_hold) 1256 { 1257 struct faultstate fs; 1258 int ahead, behind, faultcount; 1259 int nera, result, rv; 1260 bool dead, hardfault; 1261 1262 VM_CNT_INC(v_vm_faults); 1263 1264 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1265 return (KERN_PROTECTION_FAILURE); 1266 1267 fs.vp = NULL; 1268 fs.vaddr = vaddr; 1269 fs.m_hold = m_hold; 1270 fs.fault_flags = fault_flags; 1271 fs.map = map; 1272 fs.lookup_still_valid = false; 1273 fs.oom = 0; 1274 faultcount = 0; 1275 nera = -1; 1276 hardfault = false; 1277 1278 RetryFault: 1279 fs.fault_type = fault_type; 1280 1281 /* 1282 * Find the backing store object and offset into it to begin the 1283 * search. 1284 */ 1285 result = vm_fault_lookup(&fs); 1286 if (result != KERN_SUCCESS) { 1287 if (result == KERN_RESOURCE_SHORTAGE) 1288 goto RetryFault; 1289 return (result); 1290 } 1291 1292 /* 1293 * Try to avoid lock contention on the top-level object through 1294 * special-case handling of some types of page faults, specifically, 1295 * those that are mapping an existing page from the top-level object. 1296 * Under this condition, a read lock on the object suffices, allowing 1297 * multiple page faults of a similar type to run in parallel. 1298 */ 1299 if (fs.vp == NULL /* avoid locked vnode leak */ && 1300 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1301 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1302 VM_OBJECT_RLOCK(fs.first_object); 1303 rv = vm_fault_soft_fast(&fs); 1304 if (rv == KERN_SUCCESS) 1305 return (rv); 1306 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1307 VM_OBJECT_RUNLOCK(fs.first_object); 1308 VM_OBJECT_WLOCK(fs.first_object); 1309 } 1310 } else { 1311 VM_OBJECT_WLOCK(fs.first_object); 1312 } 1313 1314 /* 1315 * Make a reference to this object to prevent its disposal while we 1316 * are messing with it. Once we have the reference, the map is free 1317 * to be diddled. Since objects reference their shadows (and copies), 1318 * they will stay around as well. 1319 * 1320 * Bump the paging-in-progress count to prevent size changes (e.g. 1321 * truncation operations) during I/O. 1322 */ 1323 vm_object_reference_locked(fs.first_object); 1324 vm_object_pip_add(fs.first_object, 1); 1325 1326 fs.m_cow = fs.m = fs.first_m = NULL; 1327 1328 /* 1329 * Search for the page at object/offset. 1330 */ 1331 fs.object = fs.first_object; 1332 fs.pindex = fs.first_pindex; 1333 1334 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1335 rv = vm_fault_allocate(&fs); 1336 switch (rv) { 1337 case KERN_RESTART: 1338 unlock_and_deallocate(&fs); 1339 /* FALLTHROUGH */ 1340 case KERN_RESOURCE_SHORTAGE: 1341 goto RetryFault; 1342 case KERN_SUCCESS: 1343 case KERN_FAILURE: 1344 case KERN_OUT_OF_BOUNDS: 1345 unlock_and_deallocate(&fs); 1346 return (rv); 1347 case KERN_NOT_RECEIVER: 1348 break; 1349 default: 1350 panic("vm_fault: Unhandled rv %d", rv); 1351 } 1352 } 1353 1354 while (TRUE) { 1355 KASSERT(fs.m == NULL, 1356 ("page still set %p at loop start", fs.m)); 1357 /* 1358 * If the object is marked for imminent termination, 1359 * we retry here, since the collapse pass has raced 1360 * with us. Otherwise, if we see terminally dead 1361 * object, return fail. 1362 */ 1363 if ((fs.object->flags & OBJ_DEAD) != 0) { 1364 dead = fs.object->type == OBJT_DEAD; 1365 unlock_and_deallocate(&fs); 1366 if (dead) 1367 return (KERN_PROTECTION_FAILURE); 1368 pause("vmf_de", 1); 1369 goto RetryFault; 1370 } 1371 1372 /* 1373 * See if page is resident 1374 */ 1375 fs.m = vm_page_lookup(fs.object, fs.pindex); 1376 if (fs.m != NULL) { 1377 if (vm_page_tryxbusy(fs.m) == 0) { 1378 vm_fault_busy_sleep(&fs); 1379 goto RetryFault; 1380 } 1381 1382 /* 1383 * The page is marked busy for other processes and the 1384 * pagedaemon. If it still is completely valid we 1385 * are done. 1386 */ 1387 if (vm_page_all_valid(fs.m)) { 1388 VM_OBJECT_WUNLOCK(fs.object); 1389 break; /* break to PAGE HAS BEEN FOUND. */ 1390 } 1391 } 1392 VM_OBJECT_ASSERT_WLOCKED(fs.object); 1393 1394 /* 1395 * Page is not resident. If the pager might contain the page 1396 * or this is the beginning of the search, allocate a new 1397 * page. (Default objects are zero-fill, so there is no real 1398 * pager for them.) 1399 */ 1400 if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1401 fs.object == fs.first_object)) { 1402 rv = vm_fault_allocate(&fs); 1403 switch (rv) { 1404 case KERN_RESTART: 1405 unlock_and_deallocate(&fs); 1406 /* FALLTHROUGH */ 1407 case KERN_RESOURCE_SHORTAGE: 1408 goto RetryFault; 1409 case KERN_SUCCESS: 1410 case KERN_FAILURE: 1411 case KERN_OUT_OF_BOUNDS: 1412 unlock_and_deallocate(&fs); 1413 return (rv); 1414 case KERN_NOT_RECEIVER: 1415 break; 1416 default: 1417 panic("vm_fault: Unhandled rv %d", rv); 1418 } 1419 } 1420 1421 /* 1422 * Default objects have no pager so no exclusive busy exists 1423 * to protect this page in the chain. Skip to the next 1424 * object without dropping the lock to preserve atomicity of 1425 * shadow faults. 1426 */ 1427 if (fs.object->type != OBJT_DEFAULT) { 1428 /* 1429 * At this point, we have either allocated a new page 1430 * or found an existing page that is only partially 1431 * valid. 1432 * 1433 * We hold a reference on the current object and the 1434 * page is exclusive busied. The exclusive busy 1435 * prevents simultaneous faults and collapses while 1436 * the object lock is dropped. 1437 */ 1438 VM_OBJECT_WUNLOCK(fs.object); 1439 1440 /* 1441 * If the pager for the current object might have 1442 * the page, then determine the number of additional 1443 * pages to read and potentially reprioritize 1444 * previously read pages for earlier reclamation. 1445 * These operations should only be performed once per 1446 * page fault. Even if the current pager doesn't 1447 * have the page, the number of additional pages to 1448 * read will apply to subsequent objects in the 1449 * shadow chain. 1450 */ 1451 if (nera == -1 && !P_KILLED(curproc)) 1452 nera = vm_fault_readahead(&fs); 1453 1454 rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 1455 if (rv == KERN_SUCCESS) { 1456 faultcount = behind + 1 + ahead; 1457 hardfault = true; 1458 break; /* break to PAGE HAS BEEN FOUND. */ 1459 } 1460 if (rv == KERN_RESOURCE_SHORTAGE) 1461 goto RetryFault; 1462 VM_OBJECT_WLOCK(fs.object); 1463 if (rv == KERN_OUT_OF_BOUNDS) { 1464 fault_page_free(&fs.m); 1465 unlock_and_deallocate(&fs); 1466 return (rv); 1467 } 1468 } 1469 1470 /* 1471 * The page was not found in the current object. Try to 1472 * traverse into a backing object or zero fill if none is 1473 * found. 1474 */ 1475 if (vm_fault_next(&fs)) 1476 continue; 1477 VM_OBJECT_WUNLOCK(fs.object); 1478 vm_fault_zerofill(&fs); 1479 /* Don't try to prefault neighboring pages. */ 1480 faultcount = 1; 1481 break; /* break to PAGE HAS BEEN FOUND. */ 1482 } 1483 1484 /* 1485 * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 1486 * busied. The object lock must no longer be held. 1487 */ 1488 vm_page_assert_xbusied(fs.m); 1489 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1490 1491 /* 1492 * If the page is being written, but isn't already owned by the 1493 * top-level object, we have to copy it into a new page owned by the 1494 * top-level object. 1495 */ 1496 if (fs.object != fs.first_object) { 1497 /* 1498 * We only really need to copy if we want to write it. 1499 */ 1500 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1501 vm_fault_cow(&fs); 1502 /* 1503 * We only try to prefault read-only mappings to the 1504 * neighboring pages when this copy-on-write fault is 1505 * a hard fault. In other cases, trying to prefault 1506 * is typically wasted effort. 1507 */ 1508 if (faultcount == 0) 1509 faultcount = 1; 1510 1511 } else { 1512 fs.prot &= ~VM_PROT_WRITE; 1513 } 1514 } 1515 1516 /* 1517 * We must verify that the maps have not changed since our last 1518 * lookup. 1519 */ 1520 if (!fs.lookup_still_valid) { 1521 result = vm_fault_relookup(&fs); 1522 if (result != KERN_SUCCESS) { 1523 fault_deallocate(&fs); 1524 if (result == KERN_RESTART) 1525 goto RetryFault; 1526 return (result); 1527 } 1528 } 1529 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1530 1531 /* 1532 * If the page was filled by a pager, save the virtual address that 1533 * should be faulted on next under a sequential access pattern to the 1534 * map entry. A read lock on the map suffices to update this address 1535 * safely. 1536 */ 1537 if (hardfault) 1538 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1539 1540 /* 1541 * Page must be completely valid or it is not fit to 1542 * map into user space. vm_pager_get_pages() ensures this. 1543 */ 1544 vm_page_assert_xbusied(fs.m); 1545 KASSERT(vm_page_all_valid(fs.m), 1546 ("vm_fault: page %p partially invalid", fs.m)); 1547 1548 vm_fault_dirty(&fs, fs.m); 1549 1550 /* 1551 * Put this page into the physical map. We had to do the unlock above 1552 * because pmap_enter() may sleep. We don't put the page 1553 * back on the active queue until later so that the pageout daemon 1554 * won't find it (yet). 1555 */ 1556 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1557 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1558 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1559 fs.wired == 0) 1560 vm_fault_prefault(&fs, vaddr, 1561 faultcount > 0 ? behind : PFBAK, 1562 faultcount > 0 ? ahead : PFFOR, false); 1563 1564 /* 1565 * If the page is not wired down, then put it where the pageout daemon 1566 * can find it. 1567 */ 1568 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1569 vm_page_wire(fs.m); 1570 else 1571 vm_page_activate(fs.m); 1572 if (fs.m_hold != NULL) { 1573 (*fs.m_hold) = fs.m; 1574 vm_page_wire(fs.m); 1575 } 1576 vm_page_xunbusy(fs.m); 1577 fs.m = NULL; 1578 1579 /* 1580 * Unlock everything, and return 1581 */ 1582 fault_deallocate(&fs); 1583 if (hardfault) { 1584 VM_CNT_INC(v_io_faults); 1585 curthread->td_ru.ru_majflt++; 1586 #ifdef RACCT 1587 if (racct_enable && fs.object->type == OBJT_VNODE) { 1588 PROC_LOCK(curproc); 1589 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1590 racct_add_force(curproc, RACCT_WRITEBPS, 1591 PAGE_SIZE + behind * PAGE_SIZE); 1592 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1593 } else { 1594 racct_add_force(curproc, RACCT_READBPS, 1595 PAGE_SIZE + ahead * PAGE_SIZE); 1596 racct_add_force(curproc, RACCT_READIOPS, 1); 1597 } 1598 PROC_UNLOCK(curproc); 1599 } 1600 #endif 1601 } else 1602 curthread->td_ru.ru_minflt++; 1603 1604 return (KERN_SUCCESS); 1605 } 1606 1607 /* 1608 * Speed up the reclamation of pages that precede the faulting pindex within 1609 * the first object of the shadow chain. Essentially, perform the equivalent 1610 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1611 * the faulting pindex by the cluster size when the pages read by vm_fault() 1612 * cross a cluster-size boundary. The cluster size is the greater of the 1613 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1614 * 1615 * When "fs->first_object" is a shadow object, the pages in the backing object 1616 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1617 * function must only be concerned with pages in the first object. 1618 */ 1619 static void 1620 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1621 { 1622 vm_map_entry_t entry; 1623 vm_object_t first_object, object; 1624 vm_offset_t end, start; 1625 vm_page_t m, m_next; 1626 vm_pindex_t pend, pstart; 1627 vm_size_t size; 1628 1629 object = fs->object; 1630 VM_OBJECT_ASSERT_UNLOCKED(object); 1631 first_object = fs->first_object; 1632 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1633 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1634 VM_OBJECT_RLOCK(first_object); 1635 size = VM_FAULT_DONTNEED_MIN; 1636 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1637 size = pagesizes[1]; 1638 end = rounddown2(vaddr, size); 1639 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1640 (entry = fs->entry)->start < end) { 1641 if (end - entry->start < size) 1642 start = entry->start; 1643 else 1644 start = end - size; 1645 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1646 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1647 entry->start); 1648 m_next = vm_page_find_least(first_object, pstart); 1649 pend = OFF_TO_IDX(entry->offset) + atop(end - 1650 entry->start); 1651 while ((m = m_next) != NULL && m->pindex < pend) { 1652 m_next = TAILQ_NEXT(m, listq); 1653 if (!vm_page_all_valid(m) || 1654 vm_page_busied(m)) 1655 continue; 1656 1657 /* 1658 * Don't clear PGA_REFERENCED, since it would 1659 * likely represent a reference by a different 1660 * process. 1661 * 1662 * Typically, at this point, prefetched pages 1663 * are still in the inactive queue. Only 1664 * pages that triggered page faults are in the 1665 * active queue. The test for whether the page 1666 * is in the inactive queue is racy; in the 1667 * worst case we will requeue the page 1668 * unnecessarily. 1669 */ 1670 if (!vm_page_inactive(m)) 1671 vm_page_deactivate(m); 1672 } 1673 } 1674 VM_OBJECT_RUNLOCK(first_object); 1675 } 1676 } 1677 1678 /* 1679 * vm_fault_prefault provides a quick way of clustering 1680 * pagefaults into a processes address space. It is a "cousin" 1681 * of vm_map_pmap_enter, except it runs at page fault time instead 1682 * of mmap time. 1683 */ 1684 static void 1685 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1686 int backward, int forward, bool obj_locked) 1687 { 1688 pmap_t pmap; 1689 vm_map_entry_t entry; 1690 vm_object_t backing_object, lobject; 1691 vm_offset_t addr, starta; 1692 vm_pindex_t pindex; 1693 vm_page_t m; 1694 int i; 1695 1696 pmap = fs->map->pmap; 1697 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1698 return; 1699 1700 entry = fs->entry; 1701 1702 if (addra < backward * PAGE_SIZE) { 1703 starta = entry->start; 1704 } else { 1705 starta = addra - backward * PAGE_SIZE; 1706 if (starta < entry->start) 1707 starta = entry->start; 1708 } 1709 1710 /* 1711 * Generate the sequence of virtual addresses that are candidates for 1712 * prefaulting in an outward spiral from the faulting virtual address, 1713 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1714 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1715 * If the candidate address doesn't have a backing physical page, then 1716 * the loop immediately terminates. 1717 */ 1718 for (i = 0; i < 2 * imax(backward, forward); i++) { 1719 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1720 PAGE_SIZE); 1721 if (addr > addra + forward * PAGE_SIZE) 1722 addr = 0; 1723 1724 if (addr < starta || addr >= entry->end) 1725 continue; 1726 1727 if (!pmap_is_prefaultable(pmap, addr)) 1728 continue; 1729 1730 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1731 lobject = entry->object.vm_object; 1732 if (!obj_locked) 1733 VM_OBJECT_RLOCK(lobject); 1734 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1735 lobject->type == OBJT_DEFAULT && 1736 (backing_object = lobject->backing_object) != NULL) { 1737 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1738 0, ("vm_fault_prefault: unaligned object offset")); 1739 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1740 VM_OBJECT_RLOCK(backing_object); 1741 if (!obj_locked || lobject != entry->object.vm_object) 1742 VM_OBJECT_RUNLOCK(lobject); 1743 lobject = backing_object; 1744 } 1745 if (m == NULL) { 1746 if (!obj_locked || lobject != entry->object.vm_object) 1747 VM_OBJECT_RUNLOCK(lobject); 1748 break; 1749 } 1750 if (vm_page_all_valid(m) && 1751 (m->flags & PG_FICTITIOUS) == 0) 1752 pmap_enter_quick(pmap, addr, m, entry->protection); 1753 if (!obj_locked || lobject != entry->object.vm_object) 1754 VM_OBJECT_RUNLOCK(lobject); 1755 } 1756 } 1757 1758 /* 1759 * Hold each of the physical pages that are mapped by the specified range of 1760 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1761 * and allow the specified types of access, "prot". If all of the implied 1762 * pages are successfully held, then the number of held pages is returned 1763 * together with pointers to those pages in the array "ma". However, if any 1764 * of the pages cannot be held, -1 is returned. 1765 */ 1766 int 1767 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1768 vm_prot_t prot, vm_page_t *ma, int max_count) 1769 { 1770 vm_offset_t end, va; 1771 vm_page_t *mp; 1772 int count; 1773 boolean_t pmap_failed; 1774 1775 if (len == 0) 1776 return (0); 1777 end = round_page(addr + len); 1778 addr = trunc_page(addr); 1779 1780 if (!vm_map_range_valid(map, addr, end)) 1781 return (-1); 1782 1783 if (atop(end - addr) > max_count) 1784 panic("vm_fault_quick_hold_pages: count > max_count"); 1785 count = atop(end - addr); 1786 1787 /* 1788 * Most likely, the physical pages are resident in the pmap, so it is 1789 * faster to try pmap_extract_and_hold() first. 1790 */ 1791 pmap_failed = FALSE; 1792 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1793 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1794 if (*mp == NULL) 1795 pmap_failed = TRUE; 1796 else if ((prot & VM_PROT_WRITE) != 0 && 1797 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1798 /* 1799 * Explicitly dirty the physical page. Otherwise, the 1800 * caller's changes may go unnoticed because they are 1801 * performed through an unmanaged mapping or by a DMA 1802 * operation. 1803 * 1804 * The object lock is not held here. 1805 * See vm_page_clear_dirty_mask(). 1806 */ 1807 vm_page_dirty(*mp); 1808 } 1809 } 1810 if (pmap_failed) { 1811 /* 1812 * One or more pages could not be held by the pmap. Either no 1813 * page was mapped at the specified virtual address or that 1814 * mapping had insufficient permissions. Attempt to fault in 1815 * and hold these pages. 1816 * 1817 * If vm_fault_disable_pagefaults() was called, 1818 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1819 * acquire MD VM locks, which means we must not call 1820 * vm_fault(). Some (out of tree) callers mark 1821 * too wide a code area with vm_fault_disable_pagefaults() 1822 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1823 * the proper behaviour explicitly. 1824 */ 1825 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1826 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1827 goto error; 1828 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1829 if (*mp == NULL && vm_fault(map, va, prot, 1830 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1831 goto error; 1832 } 1833 return (count); 1834 error: 1835 for (mp = ma; mp < ma + count; mp++) 1836 if (*mp != NULL) 1837 vm_page_unwire(*mp, PQ_INACTIVE); 1838 return (-1); 1839 } 1840 1841 /* 1842 * Routine: 1843 * vm_fault_copy_entry 1844 * Function: 1845 * Create new shadow object backing dst_entry with private copy of 1846 * all underlying pages. When src_entry is equal to dst_entry, 1847 * function implements COW for wired-down map entry. Otherwise, 1848 * it forks wired entry into dst_map. 1849 * 1850 * In/out conditions: 1851 * The source and destination maps must be locked for write. 1852 * The source map entry must be wired down (or be a sharing map 1853 * entry corresponding to a main map entry that is wired down). 1854 */ 1855 void 1856 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1857 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1858 vm_ooffset_t *fork_charge) 1859 { 1860 vm_object_t backing_object, dst_object, object, src_object; 1861 vm_pindex_t dst_pindex, pindex, src_pindex; 1862 vm_prot_t access, prot; 1863 vm_offset_t vaddr; 1864 vm_page_t dst_m; 1865 vm_page_t src_m; 1866 boolean_t upgrade; 1867 1868 #ifdef lint 1869 src_map++; 1870 #endif /* lint */ 1871 1872 upgrade = src_entry == dst_entry; 1873 access = prot = dst_entry->protection; 1874 1875 src_object = src_entry->object.vm_object; 1876 src_pindex = OFF_TO_IDX(src_entry->offset); 1877 1878 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1879 dst_object = src_object; 1880 vm_object_reference(dst_object); 1881 } else { 1882 /* 1883 * Create the top-level object for the destination entry. 1884 * Doesn't actually shadow anything - we copy the pages 1885 * directly. 1886 */ 1887 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1888 dst_entry->start), NULL, NULL, 0); 1889 #if VM_NRESERVLEVEL > 0 1890 dst_object->flags |= OBJ_COLORED; 1891 dst_object->pg_color = atop(dst_entry->start); 1892 #endif 1893 dst_object->domain = src_object->domain; 1894 dst_object->charge = dst_entry->end - dst_entry->start; 1895 } 1896 1897 VM_OBJECT_WLOCK(dst_object); 1898 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1899 ("vm_fault_copy_entry: vm_object not NULL")); 1900 if (src_object != dst_object) { 1901 dst_entry->object.vm_object = dst_object; 1902 dst_entry->offset = 0; 1903 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1904 } 1905 if (fork_charge != NULL) { 1906 KASSERT(dst_entry->cred == NULL, 1907 ("vm_fault_copy_entry: leaked swp charge")); 1908 dst_object->cred = curthread->td_ucred; 1909 crhold(dst_object->cred); 1910 *fork_charge += dst_object->charge; 1911 } else if ((dst_object->type == OBJT_DEFAULT || 1912 dst_object->type == OBJT_SWAP) && 1913 dst_object->cred == NULL) { 1914 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1915 dst_entry)); 1916 dst_object->cred = dst_entry->cred; 1917 dst_entry->cred = NULL; 1918 } 1919 1920 /* 1921 * If not an upgrade, then enter the mappings in the pmap as 1922 * read and/or execute accesses. Otherwise, enter them as 1923 * write accesses. 1924 * 1925 * A writeable large page mapping is only created if all of 1926 * the constituent small page mappings are modified. Marking 1927 * PTEs as modified on inception allows promotion to happen 1928 * without taking potentially large number of soft faults. 1929 */ 1930 if (!upgrade) 1931 access &= ~VM_PROT_WRITE; 1932 1933 /* 1934 * Loop through all of the virtual pages within the entry's 1935 * range, copying each page from the source object to the 1936 * destination object. Since the source is wired, those pages 1937 * must exist. In contrast, the destination is pageable. 1938 * Since the destination object doesn't share any backing storage 1939 * with the source object, all of its pages must be dirtied, 1940 * regardless of whether they can be written. 1941 */ 1942 for (vaddr = dst_entry->start, dst_pindex = 0; 1943 vaddr < dst_entry->end; 1944 vaddr += PAGE_SIZE, dst_pindex++) { 1945 again: 1946 /* 1947 * Find the page in the source object, and copy it in. 1948 * Because the source is wired down, the page will be 1949 * in memory. 1950 */ 1951 if (src_object != dst_object) 1952 VM_OBJECT_RLOCK(src_object); 1953 object = src_object; 1954 pindex = src_pindex + dst_pindex; 1955 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1956 (backing_object = object->backing_object) != NULL) { 1957 /* 1958 * Unless the source mapping is read-only or 1959 * it is presently being upgraded from 1960 * read-only, the first object in the shadow 1961 * chain should provide all of the pages. In 1962 * other words, this loop body should never be 1963 * executed when the source mapping is already 1964 * read/write. 1965 */ 1966 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1967 upgrade, 1968 ("vm_fault_copy_entry: main object missing page")); 1969 1970 VM_OBJECT_RLOCK(backing_object); 1971 pindex += OFF_TO_IDX(object->backing_object_offset); 1972 if (object != dst_object) 1973 VM_OBJECT_RUNLOCK(object); 1974 object = backing_object; 1975 } 1976 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1977 1978 if (object != dst_object) { 1979 /* 1980 * Allocate a page in the destination object. 1981 */ 1982 dst_m = vm_page_alloc(dst_object, (src_object == 1983 dst_object ? src_pindex : 0) + dst_pindex, 1984 VM_ALLOC_NORMAL); 1985 if (dst_m == NULL) { 1986 VM_OBJECT_WUNLOCK(dst_object); 1987 VM_OBJECT_RUNLOCK(object); 1988 vm_wait(dst_object); 1989 VM_OBJECT_WLOCK(dst_object); 1990 goto again; 1991 } 1992 pmap_copy_page(src_m, dst_m); 1993 VM_OBJECT_RUNLOCK(object); 1994 dst_m->dirty = dst_m->valid = src_m->valid; 1995 } else { 1996 dst_m = src_m; 1997 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 1998 goto again; 1999 if (dst_m->pindex >= dst_object->size) { 2000 /* 2001 * We are upgrading. Index can occur 2002 * out of bounds if the object type is 2003 * vnode and the file was truncated. 2004 */ 2005 vm_page_xunbusy(dst_m); 2006 break; 2007 } 2008 } 2009 VM_OBJECT_WUNLOCK(dst_object); 2010 2011 /* 2012 * Enter it in the pmap. If a wired, copy-on-write 2013 * mapping is being replaced by a write-enabled 2014 * mapping, then wire that new mapping. 2015 * 2016 * The page can be invalid if the user called 2017 * msync(MS_INVALIDATE) or truncated the backing vnode 2018 * or shared memory object. In this case, do not 2019 * insert it into pmap, but still do the copy so that 2020 * all copies of the wired map entry have similar 2021 * backing pages. 2022 */ 2023 if (vm_page_all_valid(dst_m)) { 2024 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2025 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2026 } 2027 2028 /* 2029 * Mark it no longer busy, and put it on the active list. 2030 */ 2031 VM_OBJECT_WLOCK(dst_object); 2032 2033 if (upgrade) { 2034 if (src_m != dst_m) { 2035 vm_page_unwire(src_m, PQ_INACTIVE); 2036 vm_page_wire(dst_m); 2037 } else { 2038 KASSERT(vm_page_wired(dst_m), 2039 ("dst_m %p is not wired", dst_m)); 2040 } 2041 } else { 2042 vm_page_activate(dst_m); 2043 } 2044 vm_page_xunbusy(dst_m); 2045 } 2046 VM_OBJECT_WUNLOCK(dst_object); 2047 if (upgrade) { 2048 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2049 vm_object_deallocate(src_object); 2050 } 2051 } 2052 2053 /* 2054 * Block entry into the machine-independent layer's page fault handler by 2055 * the calling thread. Subsequent calls to vm_fault() by that thread will 2056 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2057 * spurious page faults. 2058 */ 2059 int 2060 vm_fault_disable_pagefaults(void) 2061 { 2062 2063 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2064 } 2065 2066 void 2067 vm_fault_enable_pagefaults(int save) 2068 { 2069 2070 curthread_pflags_restore(save); 2071 } 2072