1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 /* Fault parameters. */ 124 vm_offset_t vaddr; 125 vm_page_t *m_hold; 126 vm_prot_t fault_type; 127 vm_prot_t prot; 128 int fault_flags; 129 int oom; 130 boolean_t wired; 131 132 /* Page reference for cow. */ 133 vm_page_t m_cow; 134 135 /* Current object. */ 136 vm_object_t object; 137 vm_pindex_t pindex; 138 vm_page_t m; 139 140 /* Top-level map object. */ 141 vm_object_t first_object; 142 vm_pindex_t first_pindex; 143 vm_page_t first_m; 144 145 /* Map state. */ 146 vm_map_t map; 147 vm_map_entry_t entry; 148 int map_generation; 149 bool lookup_still_valid; 150 151 /* Vnode if locked. */ 152 struct vnode *vp; 153 }; 154 155 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 156 int ahead); 157 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 158 int backward, int forward, bool obj_locked); 159 160 static int vm_pfault_oom_attempts = 3; 161 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 162 &vm_pfault_oom_attempts, 0, 163 "Number of page allocation attempts in page fault handler before it " 164 "triggers OOM handling"); 165 166 static int vm_pfault_oom_wait = 10; 167 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 168 &vm_pfault_oom_wait, 0, 169 "Number of seconds to wait for free pages before retrying " 170 "the page fault handler"); 171 172 static inline void 173 fault_page_release(vm_page_t *mp) 174 { 175 vm_page_t m; 176 177 m = *mp; 178 if (m != NULL) { 179 /* 180 * We are likely to loop around again and attempt to busy 181 * this page. Deactivating it leaves it available for 182 * pageout while optimizing fault restarts. 183 */ 184 vm_page_deactivate(m); 185 vm_page_xunbusy(m); 186 *mp = NULL; 187 } 188 } 189 190 static inline void 191 fault_page_free(vm_page_t *mp) 192 { 193 vm_page_t m; 194 195 m = *mp; 196 if (m != NULL) { 197 VM_OBJECT_ASSERT_WLOCKED(m->object); 198 if (!vm_page_wired(m)) 199 vm_page_free(m); 200 else 201 vm_page_xunbusy(m); 202 *mp = NULL; 203 } 204 } 205 206 static inline void 207 unlock_map(struct faultstate *fs) 208 { 209 210 if (fs->lookup_still_valid) { 211 vm_map_lookup_done(fs->map, fs->entry); 212 fs->lookup_still_valid = false; 213 } 214 } 215 216 static void 217 unlock_vp(struct faultstate *fs) 218 { 219 220 if (fs->vp != NULL) { 221 vput(fs->vp); 222 fs->vp = NULL; 223 } 224 } 225 226 static void 227 fault_deallocate(struct faultstate *fs) 228 { 229 230 fault_page_release(&fs->m_cow); 231 fault_page_release(&fs->m); 232 vm_object_pip_wakeup(fs->object); 233 if (fs->object != fs->first_object) { 234 VM_OBJECT_WLOCK(fs->first_object); 235 fault_page_free(&fs->first_m); 236 VM_OBJECT_WUNLOCK(fs->first_object); 237 vm_object_pip_wakeup(fs->first_object); 238 } 239 vm_object_deallocate(fs->first_object); 240 unlock_map(fs); 241 unlock_vp(fs); 242 } 243 244 static void 245 unlock_and_deallocate(struct faultstate *fs) 246 { 247 248 VM_OBJECT_WUNLOCK(fs->object); 249 fault_deallocate(fs); 250 } 251 252 static void 253 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 254 { 255 bool need_dirty; 256 257 if (((fs->prot & VM_PROT_WRITE) == 0 && 258 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 259 (m->oflags & VPO_UNMANAGED) != 0) 260 return; 261 262 VM_PAGE_OBJECT_BUSY_ASSERT(m); 263 264 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 265 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 266 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 267 268 vm_object_set_writeable_dirty(m->object); 269 270 /* 271 * If the fault is a write, we know that this page is being 272 * written NOW so dirty it explicitly to save on 273 * pmap_is_modified() calls later. 274 * 275 * Also, since the page is now dirty, we can possibly tell 276 * the pager to release any swap backing the page. 277 */ 278 if (need_dirty && vm_page_set_dirty(m) == 0) { 279 /* 280 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 281 * if the page is already dirty to prevent data written with 282 * the expectation of being synced from not being synced. 283 * Likewise if this entry does not request NOSYNC then make 284 * sure the page isn't marked NOSYNC. Applications sharing 285 * data should use the same flags to avoid ping ponging. 286 */ 287 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 288 vm_page_aflag_set(m, PGA_NOSYNC); 289 else 290 vm_page_aflag_clear(m, PGA_NOSYNC); 291 } 292 293 } 294 295 /* 296 * Unlocks fs.first_object and fs.map on success. 297 */ 298 static int 299 vm_fault_soft_fast(struct faultstate *fs) 300 { 301 vm_page_t m, m_map; 302 #if VM_NRESERVLEVEL > 0 303 vm_page_t m_super; 304 int flags; 305 #endif 306 int psind, rv; 307 vm_offset_t vaddr; 308 309 MPASS(fs->vp == NULL); 310 vaddr = fs->vaddr; 311 vm_object_busy(fs->first_object); 312 m = vm_page_lookup(fs->first_object, fs->first_pindex); 313 /* A busy page can be mapped for read|execute access. */ 314 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 315 vm_page_busied(m)) || !vm_page_all_valid(m)) { 316 rv = KERN_FAILURE; 317 goto out; 318 } 319 m_map = m; 320 psind = 0; 321 #if VM_NRESERVLEVEL > 0 322 if ((m->flags & PG_FICTITIOUS) == 0 && 323 (m_super = vm_reserv_to_superpage(m)) != NULL && 324 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 325 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 326 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 327 (pagesizes[m_super->psind] - 1)) && !fs->wired && 328 pmap_ps_enabled(fs->map->pmap)) { 329 flags = PS_ALL_VALID; 330 if ((fs->prot & VM_PROT_WRITE) != 0) { 331 /* 332 * Create a superpage mapping allowing write access 333 * only if none of the constituent pages are busy and 334 * all of them are already dirty (except possibly for 335 * the page that was faulted on). 336 */ 337 flags |= PS_NONE_BUSY; 338 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 339 flags |= PS_ALL_DIRTY; 340 } 341 if (vm_page_ps_test(m_super, flags, m)) { 342 m_map = m_super; 343 psind = m_super->psind; 344 vaddr = rounddown2(vaddr, pagesizes[psind]); 345 /* Preset the modified bit for dirty superpages. */ 346 if ((flags & PS_ALL_DIRTY) != 0) 347 fs->fault_type |= VM_PROT_WRITE; 348 } 349 } 350 #endif 351 rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 352 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 353 if (rv != KERN_SUCCESS) 354 goto out; 355 if (fs->m_hold != NULL) { 356 (*fs->m_hold) = m; 357 vm_page_wire(m); 358 } 359 if (psind == 0 && !fs->wired) 360 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 361 VM_OBJECT_RUNLOCK(fs->first_object); 362 vm_fault_dirty(fs, m); 363 vm_map_lookup_done(fs->map, fs->entry); 364 curthread->td_ru.ru_minflt++; 365 366 out: 367 vm_object_unbusy(fs->first_object); 368 return (rv); 369 } 370 371 static void 372 vm_fault_restore_map_lock(struct faultstate *fs) 373 { 374 375 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 376 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 377 378 if (!vm_map_trylock_read(fs->map)) { 379 VM_OBJECT_WUNLOCK(fs->first_object); 380 vm_map_lock_read(fs->map); 381 VM_OBJECT_WLOCK(fs->first_object); 382 } 383 fs->lookup_still_valid = true; 384 } 385 386 static void 387 vm_fault_populate_check_page(vm_page_t m) 388 { 389 390 /* 391 * Check each page to ensure that the pager is obeying the 392 * interface: the page must be installed in the object, fully 393 * valid, and exclusively busied. 394 */ 395 MPASS(m != NULL); 396 MPASS(vm_page_all_valid(m)); 397 MPASS(vm_page_xbusied(m)); 398 } 399 400 static void 401 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 402 vm_pindex_t last) 403 { 404 vm_page_t m; 405 vm_pindex_t pidx; 406 407 VM_OBJECT_ASSERT_WLOCKED(object); 408 MPASS(first <= last); 409 for (pidx = first, m = vm_page_lookup(object, pidx); 410 pidx <= last; pidx++, m = vm_page_next(m)) { 411 vm_fault_populate_check_page(m); 412 vm_page_deactivate(m); 413 vm_page_xunbusy(m); 414 } 415 } 416 417 static int 418 vm_fault_populate(struct faultstate *fs) 419 { 420 vm_offset_t vaddr; 421 vm_page_t m; 422 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 423 int bdry_idx, i, npages, psind, rv; 424 425 MPASS(fs->object == fs->first_object); 426 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 427 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 428 MPASS(fs->first_object->backing_object == NULL); 429 MPASS(fs->lookup_still_valid); 430 431 pager_first = OFF_TO_IDX(fs->entry->offset); 432 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 433 unlock_map(fs); 434 unlock_vp(fs); 435 436 /* 437 * Call the pager (driver) populate() method. 438 * 439 * There is no guarantee that the method will be called again 440 * if the current fault is for read, and a future fault is 441 * for write. Report the entry's maximum allowed protection 442 * to the driver. 443 */ 444 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 445 fs->fault_type, fs->entry->max_protection, &pager_first, 446 &pager_last); 447 448 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 449 if (rv == VM_PAGER_BAD) { 450 /* 451 * VM_PAGER_BAD is the backdoor for a pager to request 452 * normal fault handling. 453 */ 454 vm_fault_restore_map_lock(fs); 455 if (fs->map->timestamp != fs->map_generation) 456 return (KERN_RESTART); 457 return (KERN_NOT_RECEIVER); 458 } 459 if (rv != VM_PAGER_OK) 460 return (KERN_FAILURE); /* AKA SIGSEGV */ 461 462 /* Ensure that the driver is obeying the interface. */ 463 MPASS(pager_first <= pager_last); 464 MPASS(fs->first_pindex <= pager_last); 465 MPASS(fs->first_pindex >= pager_first); 466 MPASS(pager_last < fs->first_object->size); 467 468 vm_fault_restore_map_lock(fs); 469 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 470 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 471 if (fs->map->timestamp != fs->map_generation) { 472 if (bdry_idx == 0) { 473 vm_fault_populate_cleanup(fs->first_object, pager_first, 474 pager_last); 475 } else { 476 m = vm_page_lookup(fs->first_object, pager_first); 477 if (m != fs->m) 478 vm_page_xunbusy(m); 479 } 480 return (KERN_RESTART); 481 } 482 483 /* 484 * The map is unchanged after our last unlock. Process the fault. 485 * 486 * First, the special case of largepage mappings, where 487 * populate only busies the first page in superpage run. 488 */ 489 if (bdry_idx != 0) { 490 KASSERT(PMAP_HAS_LARGEPAGES, 491 ("missing pmap support for large pages")); 492 m = vm_page_lookup(fs->first_object, pager_first); 493 vm_fault_populate_check_page(m); 494 VM_OBJECT_WUNLOCK(fs->first_object); 495 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 496 fs->entry->offset; 497 /* assert alignment for entry */ 498 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 499 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 500 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 501 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 502 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 503 ("unaligned superpage m %p %#jx", m, 504 (uintmax_t)VM_PAGE_TO_PHYS(m))); 505 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 506 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 507 PMAP_ENTER_LARGEPAGE, bdry_idx); 508 VM_OBJECT_WLOCK(fs->first_object); 509 vm_page_xunbusy(m); 510 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 511 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 512 vm_page_wire(m + i); 513 } 514 if (fs->m_hold != NULL) { 515 *fs->m_hold = m + (fs->first_pindex - pager_first); 516 vm_page_wire(*fs->m_hold); 517 } 518 goto out; 519 } 520 521 /* 522 * The range [pager_first, pager_last] that is given to the 523 * pager is only a hint. The pager may populate any range 524 * within the object that includes the requested page index. 525 * In case the pager expanded the range, clip it to fit into 526 * the map entry. 527 */ 528 map_first = OFF_TO_IDX(fs->entry->offset); 529 if (map_first > pager_first) { 530 vm_fault_populate_cleanup(fs->first_object, pager_first, 531 map_first - 1); 532 pager_first = map_first; 533 } 534 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 535 if (map_last < pager_last) { 536 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 537 pager_last); 538 pager_last = map_last; 539 } 540 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 541 pidx <= pager_last; 542 pidx += npages, m = vm_page_next(&m[npages - 1])) { 543 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 544 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 545 __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 546 psind = m->psind; 547 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 548 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 549 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 550 psind = 0; 551 #else 552 psind = 0; 553 #endif 554 npages = atop(pagesizes[psind]); 555 for (i = 0; i < npages; i++) { 556 vm_fault_populate_check_page(&m[i]); 557 vm_fault_dirty(fs, &m[i]); 558 } 559 VM_OBJECT_WUNLOCK(fs->first_object); 560 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 561 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 562 #if defined(__amd64__) 563 if (psind > 0 && rv == KERN_FAILURE) { 564 for (i = 0; i < npages; i++) { 565 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 566 &m[i], fs->prot, fs->fault_type | 567 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 568 MPASS(rv == KERN_SUCCESS); 569 } 570 } 571 #else 572 MPASS(rv == KERN_SUCCESS); 573 #endif 574 VM_OBJECT_WLOCK(fs->first_object); 575 for (i = 0; i < npages; i++) { 576 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 577 vm_page_wire(&m[i]); 578 else 579 vm_page_activate(&m[i]); 580 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 581 (*fs->m_hold) = &m[i]; 582 vm_page_wire(&m[i]); 583 } 584 vm_page_xunbusy(&m[i]); 585 } 586 } 587 out: 588 curthread->td_ru.ru_majflt++; 589 return (KERN_SUCCESS); 590 } 591 592 static int prot_fault_translation; 593 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 594 &prot_fault_translation, 0, 595 "Control signal to deliver on protection fault"); 596 597 /* compat definition to keep common code for signal translation */ 598 #define UCODE_PAGEFLT 12 599 #ifdef T_PAGEFLT 600 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 601 #endif 602 603 /* 604 * vm_fault_trap: 605 * 606 * Handle a page fault occurring at the given address, 607 * requiring the given permissions, in the map specified. 608 * If successful, the page is inserted into the 609 * associated physical map. 610 * 611 * NOTE: the given address should be truncated to the 612 * proper page address. 613 * 614 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 615 * a standard error specifying why the fault is fatal is returned. 616 * 617 * The map in question must be referenced, and remains so. 618 * Caller may hold no locks. 619 */ 620 int 621 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 622 int fault_flags, int *signo, int *ucode) 623 { 624 int result; 625 626 MPASS(signo == NULL || ucode != NULL); 627 #ifdef KTRACE 628 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 629 ktrfault(vaddr, fault_type); 630 #endif 631 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 632 NULL); 633 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 634 result == KERN_INVALID_ADDRESS || 635 result == KERN_RESOURCE_SHORTAGE || 636 result == KERN_PROTECTION_FAILURE || 637 result == KERN_OUT_OF_BOUNDS, 638 ("Unexpected Mach error %d from vm_fault()", result)); 639 #ifdef KTRACE 640 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 641 ktrfaultend(result); 642 #endif 643 if (result != KERN_SUCCESS && signo != NULL) { 644 switch (result) { 645 case KERN_FAILURE: 646 case KERN_INVALID_ADDRESS: 647 *signo = SIGSEGV; 648 *ucode = SEGV_MAPERR; 649 break; 650 case KERN_RESOURCE_SHORTAGE: 651 *signo = SIGBUS; 652 *ucode = BUS_OOMERR; 653 break; 654 case KERN_OUT_OF_BOUNDS: 655 *signo = SIGBUS; 656 *ucode = BUS_OBJERR; 657 break; 658 case KERN_PROTECTION_FAILURE: 659 if (prot_fault_translation == 0) { 660 /* 661 * Autodetect. This check also covers 662 * the images without the ABI-tag ELF 663 * note. 664 */ 665 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 666 curproc->p_osrel >= P_OSREL_SIGSEGV) { 667 *signo = SIGSEGV; 668 *ucode = SEGV_ACCERR; 669 } else { 670 *signo = SIGBUS; 671 *ucode = UCODE_PAGEFLT; 672 } 673 } else if (prot_fault_translation == 1) { 674 /* Always compat mode. */ 675 *signo = SIGBUS; 676 *ucode = UCODE_PAGEFLT; 677 } else { 678 /* Always SIGSEGV mode. */ 679 *signo = SIGSEGV; 680 *ucode = SEGV_ACCERR; 681 } 682 break; 683 default: 684 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 685 result)); 686 break; 687 } 688 } 689 return (result); 690 } 691 692 static int 693 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 694 { 695 struct vnode *vp; 696 int error, locked; 697 698 if (fs->object->type != OBJT_VNODE) 699 return (KERN_SUCCESS); 700 vp = fs->object->handle; 701 if (vp == fs->vp) { 702 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 703 return (KERN_SUCCESS); 704 } 705 706 /* 707 * Perform an unlock in case the desired vnode changed while 708 * the map was unlocked during a retry. 709 */ 710 unlock_vp(fs); 711 712 locked = VOP_ISLOCKED(vp); 713 if (locked != LK_EXCLUSIVE) 714 locked = LK_SHARED; 715 716 /* 717 * We must not sleep acquiring the vnode lock while we have 718 * the page exclusive busied or the object's 719 * paging-in-progress count incremented. Otherwise, we could 720 * deadlock. 721 */ 722 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 723 if (error == 0) { 724 fs->vp = vp; 725 return (KERN_SUCCESS); 726 } 727 728 vhold(vp); 729 if (objlocked) 730 unlock_and_deallocate(fs); 731 else 732 fault_deallocate(fs); 733 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 734 vdrop(vp); 735 fs->vp = vp; 736 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 737 return (KERN_RESOURCE_SHORTAGE); 738 } 739 740 /* 741 * Calculate the desired readahead. Handle drop-behind. 742 * 743 * Returns the number of readahead blocks to pass to the pager. 744 */ 745 static int 746 vm_fault_readahead(struct faultstate *fs) 747 { 748 int era, nera; 749 u_char behavior; 750 751 KASSERT(fs->lookup_still_valid, ("map unlocked")); 752 era = fs->entry->read_ahead; 753 behavior = vm_map_entry_behavior(fs->entry); 754 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 755 nera = 0; 756 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 757 nera = VM_FAULT_READ_AHEAD_MAX; 758 if (fs->vaddr == fs->entry->next_read) 759 vm_fault_dontneed(fs, fs->vaddr, nera); 760 } else if (fs->vaddr == fs->entry->next_read) { 761 /* 762 * This is a sequential fault. Arithmetically 763 * increase the requested number of pages in 764 * the read-ahead window. The requested 765 * number of pages is "# of sequential faults 766 * x (read ahead min + 1) + read ahead min" 767 */ 768 nera = VM_FAULT_READ_AHEAD_MIN; 769 if (era > 0) { 770 nera += era + 1; 771 if (nera > VM_FAULT_READ_AHEAD_MAX) 772 nera = VM_FAULT_READ_AHEAD_MAX; 773 } 774 if (era == VM_FAULT_READ_AHEAD_MAX) 775 vm_fault_dontneed(fs, fs->vaddr, nera); 776 } else { 777 /* 778 * This is a non-sequential fault. 779 */ 780 nera = 0; 781 } 782 if (era != nera) { 783 /* 784 * A read lock on the map suffices to update 785 * the read ahead count safely. 786 */ 787 fs->entry->read_ahead = nera; 788 } 789 790 return (nera); 791 } 792 793 static int 794 vm_fault_lookup(struct faultstate *fs) 795 { 796 int result; 797 798 KASSERT(!fs->lookup_still_valid, 799 ("vm_fault_lookup: Map already locked.")); 800 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 801 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 802 &fs->first_pindex, &fs->prot, &fs->wired); 803 if (result != KERN_SUCCESS) { 804 unlock_vp(fs); 805 return (result); 806 } 807 808 fs->map_generation = fs->map->timestamp; 809 810 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 811 panic("%s: fault on nofault entry, addr: %#lx", 812 __func__, (u_long)fs->vaddr); 813 } 814 815 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 816 fs->entry->wiring_thread != curthread) { 817 vm_map_unlock_read(fs->map); 818 vm_map_lock(fs->map); 819 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 820 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 821 unlock_vp(fs); 822 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 823 vm_map_unlock_and_wait(fs->map, 0); 824 } else 825 vm_map_unlock(fs->map); 826 return (KERN_RESOURCE_SHORTAGE); 827 } 828 829 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 830 831 if (fs->wired) 832 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 833 else 834 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 835 ("!fs->wired && VM_FAULT_WIRE")); 836 fs->lookup_still_valid = true; 837 838 return (KERN_SUCCESS); 839 } 840 841 static int 842 vm_fault_relookup(struct faultstate *fs) 843 { 844 vm_object_t retry_object; 845 vm_pindex_t retry_pindex; 846 vm_prot_t retry_prot; 847 int result; 848 849 if (!vm_map_trylock_read(fs->map)) 850 return (KERN_RESTART); 851 852 fs->lookup_still_valid = true; 853 if (fs->map->timestamp == fs->map_generation) 854 return (KERN_SUCCESS); 855 856 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 857 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 858 &fs->wired); 859 if (result != KERN_SUCCESS) { 860 /* 861 * If retry of map lookup would have blocked then 862 * retry fault from start. 863 */ 864 if (result == KERN_FAILURE) 865 return (KERN_RESTART); 866 return (result); 867 } 868 if (retry_object != fs->first_object || 869 retry_pindex != fs->first_pindex) 870 return (KERN_RESTART); 871 872 /* 873 * Check whether the protection has changed or the object has 874 * been copied while we left the map unlocked. Changing from 875 * read to write permission is OK - we leave the page 876 * write-protected, and catch the write fault. Changing from 877 * write to read permission means that we can't mark the page 878 * write-enabled after all. 879 */ 880 fs->prot &= retry_prot; 881 fs->fault_type &= retry_prot; 882 if (fs->prot == 0) 883 return (KERN_RESTART); 884 885 /* Reassert because wired may have changed. */ 886 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 887 ("!wired && VM_FAULT_WIRE")); 888 889 return (KERN_SUCCESS); 890 } 891 892 static void 893 vm_fault_cow(struct faultstate *fs) 894 { 895 bool is_first_object_locked; 896 897 /* 898 * This allows pages to be virtually copied from a backing_object 899 * into the first_object, where the backing object has no other 900 * refs to it, and cannot gain any more refs. Instead of a bcopy, 901 * we just move the page from the backing object to the first 902 * object. Note that we must mark the page dirty in the first 903 * object so that it will go out to swap when needed. 904 */ 905 is_first_object_locked = false; 906 if ( 907 /* 908 * Only one shadow object and no other refs. 909 */ 910 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 911 /* 912 * No other ways to look the object up 913 */ 914 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 915 /* 916 * We don't chase down the shadow chain and we can acquire locks. 917 */ 918 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 919 fs->object == fs->first_object->backing_object && 920 VM_OBJECT_TRYWLOCK(fs->object)) { 921 /* 922 * Remove but keep xbusy for replace. fs->m is moved into 923 * fs->first_object and left busy while fs->first_m is 924 * conditionally freed. 925 */ 926 vm_page_remove_xbusy(fs->m); 927 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 928 fs->first_m); 929 vm_page_dirty(fs->m); 930 #if VM_NRESERVLEVEL > 0 931 /* 932 * Rename the reservation. 933 */ 934 vm_reserv_rename(fs->m, fs->first_object, fs->object, 935 OFF_TO_IDX(fs->first_object->backing_object_offset)); 936 #endif 937 VM_OBJECT_WUNLOCK(fs->object); 938 VM_OBJECT_WUNLOCK(fs->first_object); 939 fs->first_m = fs->m; 940 fs->m = NULL; 941 VM_CNT_INC(v_cow_optim); 942 } else { 943 if (is_first_object_locked) 944 VM_OBJECT_WUNLOCK(fs->first_object); 945 /* 946 * Oh, well, lets copy it. 947 */ 948 pmap_copy_page(fs->m, fs->first_m); 949 vm_page_valid(fs->first_m); 950 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 951 vm_page_wire(fs->first_m); 952 vm_page_unwire(fs->m, PQ_INACTIVE); 953 } 954 /* 955 * Save the cow page to be released after 956 * pmap_enter is complete. 957 */ 958 fs->m_cow = fs->m; 959 fs->m = NULL; 960 } 961 /* 962 * fs->object != fs->first_object due to above 963 * conditional 964 */ 965 vm_object_pip_wakeup(fs->object); 966 967 /* 968 * Only use the new page below... 969 */ 970 fs->object = fs->first_object; 971 fs->pindex = fs->first_pindex; 972 fs->m = fs->first_m; 973 VM_CNT_INC(v_cow_faults); 974 curthread->td_cow++; 975 } 976 977 static bool 978 vm_fault_next(struct faultstate *fs) 979 { 980 vm_object_t next_object; 981 982 /* 983 * The requested page does not exist at this object/ 984 * offset. Remove the invalid page from the object, 985 * waking up anyone waiting for it, and continue on to 986 * the next object. However, if this is the top-level 987 * object, we must leave the busy page in place to 988 * prevent another process from rushing past us, and 989 * inserting the page in that object at the same time 990 * that we are. 991 */ 992 if (fs->object == fs->first_object) { 993 fs->first_m = fs->m; 994 fs->m = NULL; 995 } else 996 fault_page_free(&fs->m); 997 998 /* 999 * Move on to the next object. Lock the next object before 1000 * unlocking the current one. 1001 */ 1002 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1003 next_object = fs->object->backing_object; 1004 if (next_object == NULL) 1005 return (false); 1006 MPASS(fs->first_m != NULL); 1007 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1008 VM_OBJECT_WLOCK(next_object); 1009 vm_object_pip_add(next_object, 1); 1010 if (fs->object != fs->first_object) 1011 vm_object_pip_wakeup(fs->object); 1012 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1013 VM_OBJECT_WUNLOCK(fs->object); 1014 fs->object = next_object; 1015 1016 return (true); 1017 } 1018 1019 static void 1020 vm_fault_zerofill(struct faultstate *fs) 1021 { 1022 1023 /* 1024 * If there's no object left, fill the page in the top 1025 * object with zeros. 1026 */ 1027 if (fs->object != fs->first_object) { 1028 vm_object_pip_wakeup(fs->object); 1029 fs->object = fs->first_object; 1030 fs->pindex = fs->first_pindex; 1031 } 1032 MPASS(fs->first_m != NULL); 1033 MPASS(fs->m == NULL); 1034 fs->m = fs->first_m; 1035 fs->first_m = NULL; 1036 1037 /* 1038 * Zero the page if necessary and mark it valid. 1039 */ 1040 if ((fs->m->flags & PG_ZERO) == 0) { 1041 pmap_zero_page(fs->m); 1042 } else { 1043 VM_CNT_INC(v_ozfod); 1044 } 1045 VM_CNT_INC(v_zfod); 1046 vm_page_valid(fs->m); 1047 } 1048 1049 /* 1050 * Allocate a page directly or via the object populate method. 1051 */ 1052 static int 1053 vm_fault_allocate(struct faultstate *fs) 1054 { 1055 struct domainset *dset; 1056 int alloc_req; 1057 int rv; 1058 1059 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1060 rv = vm_fault_lock_vnode(fs, true); 1061 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1062 if (rv == KERN_RESOURCE_SHORTAGE) 1063 return (rv); 1064 } 1065 1066 if (fs->pindex >= fs->object->size) 1067 return (KERN_OUT_OF_BOUNDS); 1068 1069 if (fs->object == fs->first_object && 1070 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1071 fs->first_object->shadow_count == 0) { 1072 rv = vm_fault_populate(fs); 1073 switch (rv) { 1074 case KERN_SUCCESS: 1075 case KERN_FAILURE: 1076 case KERN_RESTART: 1077 return (rv); 1078 case KERN_NOT_RECEIVER: 1079 /* 1080 * Pager's populate() method 1081 * returned VM_PAGER_BAD. 1082 */ 1083 break; 1084 default: 1085 panic("inconsistent return codes"); 1086 } 1087 } 1088 1089 /* 1090 * Allocate a new page for this object/offset pair. 1091 * 1092 * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1093 * might be not observed there, and allocation can fail, causing 1094 * restart and new reading of the p_flag. 1095 */ 1096 dset = fs->object->domain.dr_policy; 1097 if (dset == NULL) 1098 dset = curthread->td_domain.dr_policy; 1099 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1100 #if VM_NRESERVLEVEL > 0 1101 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1102 #endif 1103 alloc_req = P_KILLED(curproc) ? 1104 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1105 if (fs->object->type != OBJT_VNODE && 1106 fs->object->backing_object == NULL) 1107 alloc_req |= VM_ALLOC_ZERO; 1108 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1109 } 1110 if (fs->m == NULL) { 1111 unlock_and_deallocate(fs); 1112 if (vm_pfault_oom_attempts < 0 || 1113 fs->oom < vm_pfault_oom_attempts) { 1114 fs->oom++; 1115 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1116 } else { 1117 if (bootverbose) 1118 printf( 1119 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1120 curproc->p_pid, curproc->p_comm); 1121 vm_pageout_oom(VM_OOM_MEM_PF); 1122 fs->oom = 0; 1123 } 1124 return (KERN_RESOURCE_SHORTAGE); 1125 } 1126 fs->oom = 0; 1127 1128 return (KERN_NOT_RECEIVER); 1129 } 1130 1131 /* 1132 * Call the pager to retrieve the page if there is a chance 1133 * that the pager has it, and potentially retrieve additional 1134 * pages at the same time. 1135 */ 1136 static int 1137 vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 1138 { 1139 vm_offset_t e_end, e_start; 1140 int ahead, behind, cluster_offset, rv; 1141 u_char behavior; 1142 1143 /* 1144 * Prepare for unlocking the map. Save the map 1145 * entry's start and end addresses, which are used to 1146 * optimize the size of the pager operation below. 1147 * Even if the map entry's addresses change after 1148 * unlocking the map, using the saved addresses is 1149 * safe. 1150 */ 1151 e_start = fs->entry->start; 1152 e_end = fs->entry->end; 1153 behavior = vm_map_entry_behavior(fs->entry); 1154 1155 /* 1156 * Release the map lock before locking the vnode or 1157 * sleeping in the pager. (If the current object has 1158 * a shadow, then an earlier iteration of this loop 1159 * may have already unlocked the map.) 1160 */ 1161 unlock_map(fs); 1162 1163 rv = vm_fault_lock_vnode(fs, false); 1164 MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1165 if (rv == KERN_RESOURCE_SHORTAGE) 1166 return (rv); 1167 KASSERT(fs->vp == NULL || !fs->map->system_map, 1168 ("vm_fault: vnode-backed object mapped by system map")); 1169 1170 /* 1171 * Page in the requested page and hint the pager, 1172 * that it may bring up surrounding pages. 1173 */ 1174 if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1175 P_KILLED(curproc)) { 1176 behind = 0; 1177 ahead = 0; 1178 } else { 1179 /* Is this a sequential fault? */ 1180 if (nera > 0) { 1181 behind = 0; 1182 ahead = nera; 1183 } else { 1184 /* 1185 * Request a cluster of pages that is 1186 * aligned to a VM_FAULT_READ_DEFAULT 1187 * page offset boundary within the 1188 * object. Alignment to a page offset 1189 * boundary is more likely to coincide 1190 * with the underlying file system 1191 * block than alignment to a virtual 1192 * address boundary. 1193 */ 1194 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1195 behind = ulmin(cluster_offset, 1196 atop(fs->vaddr - e_start)); 1197 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1198 } 1199 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1200 } 1201 *behindp = behind; 1202 *aheadp = ahead; 1203 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1204 if (rv == VM_PAGER_OK) 1205 return (KERN_SUCCESS); 1206 if (rv == VM_PAGER_ERROR) 1207 printf("vm_fault: pager read error, pid %d (%s)\n", 1208 curproc->p_pid, curproc->p_comm); 1209 /* 1210 * If an I/O error occurred or the requested page was 1211 * outside the range of the pager, clean up and return 1212 * an error. 1213 */ 1214 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 1215 return (KERN_OUT_OF_BOUNDS); 1216 return (KERN_NOT_RECEIVER); 1217 } 1218 1219 /* 1220 * Wait/Retry if the page is busy. We have to do this if the page is 1221 * either exclusive or shared busy because the vm_pager may be using 1222 * read busy for pageouts (and even pageins if it is the vnode pager), 1223 * and we could end up trying to pagein and pageout the same page 1224 * simultaneously. 1225 * 1226 * We can theoretically allow the busy case on a read fault if the page 1227 * is marked valid, but since such pages are typically already pmap'd, 1228 * putting that special case in might be more effort then it is worth. 1229 * We cannot under any circumstances mess around with a shared busied 1230 * page except, perhaps, to pmap it. 1231 */ 1232 static void 1233 vm_fault_busy_sleep(struct faultstate *fs) 1234 { 1235 /* 1236 * Reference the page before unlocking and 1237 * sleeping so that the page daemon is less 1238 * likely to reclaim it. 1239 */ 1240 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1241 if (fs->object != fs->first_object) { 1242 fault_page_release(&fs->first_m); 1243 vm_object_pip_wakeup(fs->first_object); 1244 } 1245 vm_object_pip_wakeup(fs->object); 1246 unlock_map(fs); 1247 if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 1248 vm_page_busy_sleep(fs->m, "vmpfw", false); 1249 else 1250 VM_OBJECT_WUNLOCK(fs->object); 1251 VM_CNT_INC(v_intrans); 1252 vm_object_deallocate(fs->first_object); 1253 } 1254 1255 int 1256 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1257 int fault_flags, vm_page_t *m_hold) 1258 { 1259 struct faultstate fs; 1260 int ahead, behind, faultcount; 1261 int nera, result, rv; 1262 bool dead, hardfault; 1263 1264 VM_CNT_INC(v_vm_faults); 1265 1266 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1267 return (KERN_PROTECTION_FAILURE); 1268 1269 fs.vp = NULL; 1270 fs.vaddr = vaddr; 1271 fs.m_hold = m_hold; 1272 fs.fault_flags = fault_flags; 1273 fs.map = map; 1274 fs.lookup_still_valid = false; 1275 fs.oom = 0; 1276 faultcount = 0; 1277 nera = -1; 1278 hardfault = false; 1279 1280 RetryFault: 1281 fs.fault_type = fault_type; 1282 1283 /* 1284 * Find the backing store object and offset into it to begin the 1285 * search. 1286 */ 1287 result = vm_fault_lookup(&fs); 1288 if (result != KERN_SUCCESS) { 1289 if (result == KERN_RESOURCE_SHORTAGE) 1290 goto RetryFault; 1291 return (result); 1292 } 1293 1294 /* 1295 * Try to avoid lock contention on the top-level object through 1296 * special-case handling of some types of page faults, specifically, 1297 * those that are mapping an existing page from the top-level object. 1298 * Under this condition, a read lock on the object suffices, allowing 1299 * multiple page faults of a similar type to run in parallel. 1300 */ 1301 if (fs.vp == NULL /* avoid locked vnode leak */ && 1302 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1303 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1304 VM_OBJECT_RLOCK(fs.first_object); 1305 rv = vm_fault_soft_fast(&fs); 1306 if (rv == KERN_SUCCESS) 1307 return (rv); 1308 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1309 VM_OBJECT_RUNLOCK(fs.first_object); 1310 VM_OBJECT_WLOCK(fs.first_object); 1311 } 1312 } else { 1313 VM_OBJECT_WLOCK(fs.first_object); 1314 } 1315 1316 /* 1317 * Make a reference to this object to prevent its disposal while we 1318 * are messing with it. Once we have the reference, the map is free 1319 * to be diddled. Since objects reference their shadows (and copies), 1320 * they will stay around as well. 1321 * 1322 * Bump the paging-in-progress count to prevent size changes (e.g. 1323 * truncation operations) during I/O. 1324 */ 1325 vm_object_reference_locked(fs.first_object); 1326 vm_object_pip_add(fs.first_object, 1); 1327 1328 fs.m_cow = fs.m = fs.first_m = NULL; 1329 1330 /* 1331 * Search for the page at object/offset. 1332 */ 1333 fs.object = fs.first_object; 1334 fs.pindex = fs.first_pindex; 1335 1336 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1337 rv = vm_fault_allocate(&fs); 1338 switch (rv) { 1339 case KERN_RESTART: 1340 unlock_and_deallocate(&fs); 1341 /* FALLTHROUGH */ 1342 case KERN_RESOURCE_SHORTAGE: 1343 goto RetryFault; 1344 case KERN_SUCCESS: 1345 case KERN_FAILURE: 1346 case KERN_OUT_OF_BOUNDS: 1347 unlock_and_deallocate(&fs); 1348 return (rv); 1349 case KERN_NOT_RECEIVER: 1350 break; 1351 default: 1352 panic("vm_fault: Unhandled rv %d", rv); 1353 } 1354 } 1355 1356 while (TRUE) { 1357 KASSERT(fs.m == NULL, 1358 ("page still set %p at loop start", fs.m)); 1359 /* 1360 * If the object is marked for imminent termination, 1361 * we retry here, since the collapse pass has raced 1362 * with us. Otherwise, if we see terminally dead 1363 * object, return fail. 1364 */ 1365 if ((fs.object->flags & OBJ_DEAD) != 0) { 1366 dead = fs.object->type == OBJT_DEAD; 1367 unlock_and_deallocate(&fs); 1368 if (dead) 1369 return (KERN_PROTECTION_FAILURE); 1370 pause("vmf_de", 1); 1371 goto RetryFault; 1372 } 1373 1374 /* 1375 * See if page is resident 1376 */ 1377 fs.m = vm_page_lookup(fs.object, fs.pindex); 1378 if (fs.m != NULL) { 1379 if (vm_page_tryxbusy(fs.m) == 0) { 1380 vm_fault_busy_sleep(&fs); 1381 goto RetryFault; 1382 } 1383 1384 /* 1385 * The page is marked busy for other processes and the 1386 * pagedaemon. If it still is completely valid we 1387 * are done. 1388 */ 1389 if (vm_page_all_valid(fs.m)) { 1390 VM_OBJECT_WUNLOCK(fs.object); 1391 break; /* break to PAGE HAS BEEN FOUND. */ 1392 } 1393 } 1394 VM_OBJECT_ASSERT_WLOCKED(fs.object); 1395 1396 /* 1397 * Page is not resident. If the pager might contain the page 1398 * or this is the beginning of the search, allocate a new 1399 * page. (Default objects are zero-fill, so there is no real 1400 * pager for them.) 1401 */ 1402 if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1403 fs.object == fs.first_object)) { 1404 rv = vm_fault_allocate(&fs); 1405 switch (rv) { 1406 case KERN_RESTART: 1407 unlock_and_deallocate(&fs); 1408 /* FALLTHROUGH */ 1409 case KERN_RESOURCE_SHORTAGE: 1410 goto RetryFault; 1411 case KERN_SUCCESS: 1412 case KERN_FAILURE: 1413 case KERN_OUT_OF_BOUNDS: 1414 unlock_and_deallocate(&fs); 1415 return (rv); 1416 case KERN_NOT_RECEIVER: 1417 break; 1418 default: 1419 panic("vm_fault: Unhandled rv %d", rv); 1420 } 1421 } 1422 1423 /* 1424 * Default objects have no pager so no exclusive busy exists 1425 * to protect this page in the chain. Skip to the next 1426 * object without dropping the lock to preserve atomicity of 1427 * shadow faults. 1428 */ 1429 if (fs.object->type != OBJT_DEFAULT) { 1430 /* 1431 * At this point, we have either allocated a new page 1432 * or found an existing page that is only partially 1433 * valid. 1434 * 1435 * We hold a reference on the current object and the 1436 * page is exclusive busied. The exclusive busy 1437 * prevents simultaneous faults and collapses while 1438 * the object lock is dropped. 1439 */ 1440 VM_OBJECT_WUNLOCK(fs.object); 1441 1442 /* 1443 * If the pager for the current object might have 1444 * the page, then determine the number of additional 1445 * pages to read and potentially reprioritize 1446 * previously read pages for earlier reclamation. 1447 * These operations should only be performed once per 1448 * page fault. Even if the current pager doesn't 1449 * have the page, the number of additional pages to 1450 * read will apply to subsequent objects in the 1451 * shadow chain. 1452 */ 1453 if (nera == -1 && !P_KILLED(curproc)) 1454 nera = vm_fault_readahead(&fs); 1455 1456 rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 1457 if (rv == KERN_SUCCESS) { 1458 faultcount = behind + 1 + ahead; 1459 hardfault = true; 1460 break; /* break to PAGE HAS BEEN FOUND. */ 1461 } 1462 if (rv == KERN_RESOURCE_SHORTAGE) 1463 goto RetryFault; 1464 VM_OBJECT_WLOCK(fs.object); 1465 if (rv == KERN_OUT_OF_BOUNDS) { 1466 fault_page_free(&fs.m); 1467 unlock_and_deallocate(&fs); 1468 return (rv); 1469 } 1470 } 1471 1472 /* 1473 * The page was not found in the current object. Try to 1474 * traverse into a backing object or zero fill if none is 1475 * found. 1476 */ 1477 if (vm_fault_next(&fs)) 1478 continue; 1479 VM_OBJECT_WUNLOCK(fs.object); 1480 vm_fault_zerofill(&fs); 1481 /* Don't try to prefault neighboring pages. */ 1482 faultcount = 1; 1483 break; /* break to PAGE HAS BEEN FOUND. */ 1484 } 1485 1486 /* 1487 * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 1488 * busied. The object lock must no longer be held. 1489 */ 1490 vm_page_assert_xbusied(fs.m); 1491 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1492 1493 /* 1494 * If the page is being written, but isn't already owned by the 1495 * top-level object, we have to copy it into a new page owned by the 1496 * top-level object. 1497 */ 1498 if (fs.object != fs.first_object) { 1499 /* 1500 * We only really need to copy if we want to write it. 1501 */ 1502 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1503 vm_fault_cow(&fs); 1504 /* 1505 * We only try to prefault read-only mappings to the 1506 * neighboring pages when this copy-on-write fault is 1507 * a hard fault. In other cases, trying to prefault 1508 * is typically wasted effort. 1509 */ 1510 if (faultcount == 0) 1511 faultcount = 1; 1512 1513 } else { 1514 fs.prot &= ~VM_PROT_WRITE; 1515 } 1516 } 1517 1518 /* 1519 * We must verify that the maps have not changed since our last 1520 * lookup. 1521 */ 1522 if (!fs.lookup_still_valid) { 1523 result = vm_fault_relookup(&fs); 1524 if (result != KERN_SUCCESS) { 1525 fault_deallocate(&fs); 1526 if (result == KERN_RESTART) 1527 goto RetryFault; 1528 return (result); 1529 } 1530 } 1531 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1532 1533 /* 1534 * If the page was filled by a pager, save the virtual address that 1535 * should be faulted on next under a sequential access pattern to the 1536 * map entry. A read lock on the map suffices to update this address 1537 * safely. 1538 */ 1539 if (hardfault) 1540 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1541 1542 /* 1543 * Page must be completely valid or it is not fit to 1544 * map into user space. vm_pager_get_pages() ensures this. 1545 */ 1546 vm_page_assert_xbusied(fs.m); 1547 KASSERT(vm_page_all_valid(fs.m), 1548 ("vm_fault: page %p partially invalid", fs.m)); 1549 1550 vm_fault_dirty(&fs, fs.m); 1551 1552 /* 1553 * Put this page into the physical map. We had to do the unlock above 1554 * because pmap_enter() may sleep. We don't put the page 1555 * back on the active queue until later so that the pageout daemon 1556 * won't find it (yet). 1557 */ 1558 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1559 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1560 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1561 fs.wired == 0) 1562 vm_fault_prefault(&fs, vaddr, 1563 faultcount > 0 ? behind : PFBAK, 1564 faultcount > 0 ? ahead : PFFOR, false); 1565 1566 /* 1567 * If the page is not wired down, then put it where the pageout daemon 1568 * can find it. 1569 */ 1570 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1571 vm_page_wire(fs.m); 1572 else 1573 vm_page_activate(fs.m); 1574 if (fs.m_hold != NULL) { 1575 (*fs.m_hold) = fs.m; 1576 vm_page_wire(fs.m); 1577 } 1578 vm_page_xunbusy(fs.m); 1579 fs.m = NULL; 1580 1581 /* 1582 * Unlock everything, and return 1583 */ 1584 fault_deallocate(&fs); 1585 if (hardfault) { 1586 VM_CNT_INC(v_io_faults); 1587 curthread->td_ru.ru_majflt++; 1588 #ifdef RACCT 1589 if (racct_enable && fs.object->type == OBJT_VNODE) { 1590 PROC_LOCK(curproc); 1591 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1592 racct_add_force(curproc, RACCT_WRITEBPS, 1593 PAGE_SIZE + behind * PAGE_SIZE); 1594 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1595 } else { 1596 racct_add_force(curproc, RACCT_READBPS, 1597 PAGE_SIZE + ahead * PAGE_SIZE); 1598 racct_add_force(curproc, RACCT_READIOPS, 1); 1599 } 1600 PROC_UNLOCK(curproc); 1601 } 1602 #endif 1603 } else 1604 curthread->td_ru.ru_minflt++; 1605 1606 return (KERN_SUCCESS); 1607 } 1608 1609 /* 1610 * Speed up the reclamation of pages that precede the faulting pindex within 1611 * the first object of the shadow chain. Essentially, perform the equivalent 1612 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1613 * the faulting pindex by the cluster size when the pages read by vm_fault() 1614 * cross a cluster-size boundary. The cluster size is the greater of the 1615 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1616 * 1617 * When "fs->first_object" is a shadow object, the pages in the backing object 1618 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1619 * function must only be concerned with pages in the first object. 1620 */ 1621 static void 1622 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1623 { 1624 vm_map_entry_t entry; 1625 vm_object_t first_object, object; 1626 vm_offset_t end, start; 1627 vm_page_t m, m_next; 1628 vm_pindex_t pend, pstart; 1629 vm_size_t size; 1630 1631 object = fs->object; 1632 VM_OBJECT_ASSERT_UNLOCKED(object); 1633 first_object = fs->first_object; 1634 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1635 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1636 VM_OBJECT_RLOCK(first_object); 1637 size = VM_FAULT_DONTNEED_MIN; 1638 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1639 size = pagesizes[1]; 1640 end = rounddown2(vaddr, size); 1641 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1642 (entry = fs->entry)->start < end) { 1643 if (end - entry->start < size) 1644 start = entry->start; 1645 else 1646 start = end - size; 1647 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1648 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1649 entry->start); 1650 m_next = vm_page_find_least(first_object, pstart); 1651 pend = OFF_TO_IDX(entry->offset) + atop(end - 1652 entry->start); 1653 while ((m = m_next) != NULL && m->pindex < pend) { 1654 m_next = TAILQ_NEXT(m, listq); 1655 if (!vm_page_all_valid(m) || 1656 vm_page_busied(m)) 1657 continue; 1658 1659 /* 1660 * Don't clear PGA_REFERENCED, since it would 1661 * likely represent a reference by a different 1662 * process. 1663 * 1664 * Typically, at this point, prefetched pages 1665 * are still in the inactive queue. Only 1666 * pages that triggered page faults are in the 1667 * active queue. The test for whether the page 1668 * is in the inactive queue is racy; in the 1669 * worst case we will requeue the page 1670 * unnecessarily. 1671 */ 1672 if (!vm_page_inactive(m)) 1673 vm_page_deactivate(m); 1674 } 1675 } 1676 VM_OBJECT_RUNLOCK(first_object); 1677 } 1678 } 1679 1680 /* 1681 * vm_fault_prefault provides a quick way of clustering 1682 * pagefaults into a processes address space. It is a "cousin" 1683 * of vm_map_pmap_enter, except it runs at page fault time instead 1684 * of mmap time. 1685 */ 1686 static void 1687 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1688 int backward, int forward, bool obj_locked) 1689 { 1690 pmap_t pmap; 1691 vm_map_entry_t entry; 1692 vm_object_t backing_object, lobject; 1693 vm_offset_t addr, starta; 1694 vm_pindex_t pindex; 1695 vm_page_t m; 1696 int i; 1697 1698 pmap = fs->map->pmap; 1699 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1700 return; 1701 1702 entry = fs->entry; 1703 1704 if (addra < backward * PAGE_SIZE) { 1705 starta = entry->start; 1706 } else { 1707 starta = addra - backward * PAGE_SIZE; 1708 if (starta < entry->start) 1709 starta = entry->start; 1710 } 1711 1712 /* 1713 * Generate the sequence of virtual addresses that are candidates for 1714 * prefaulting in an outward spiral from the faulting virtual address, 1715 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1716 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1717 * If the candidate address doesn't have a backing physical page, then 1718 * the loop immediately terminates. 1719 */ 1720 for (i = 0; i < 2 * imax(backward, forward); i++) { 1721 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1722 PAGE_SIZE); 1723 if (addr > addra + forward * PAGE_SIZE) 1724 addr = 0; 1725 1726 if (addr < starta || addr >= entry->end) 1727 continue; 1728 1729 if (!pmap_is_prefaultable(pmap, addr)) 1730 continue; 1731 1732 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1733 lobject = entry->object.vm_object; 1734 if (!obj_locked) 1735 VM_OBJECT_RLOCK(lobject); 1736 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1737 lobject->type == OBJT_DEFAULT && 1738 (backing_object = lobject->backing_object) != NULL) { 1739 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1740 0, ("vm_fault_prefault: unaligned object offset")); 1741 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1742 VM_OBJECT_RLOCK(backing_object); 1743 if (!obj_locked || lobject != entry->object.vm_object) 1744 VM_OBJECT_RUNLOCK(lobject); 1745 lobject = backing_object; 1746 } 1747 if (m == NULL) { 1748 if (!obj_locked || lobject != entry->object.vm_object) 1749 VM_OBJECT_RUNLOCK(lobject); 1750 break; 1751 } 1752 if (vm_page_all_valid(m) && 1753 (m->flags & PG_FICTITIOUS) == 0) 1754 pmap_enter_quick(pmap, addr, m, entry->protection); 1755 if (!obj_locked || lobject != entry->object.vm_object) 1756 VM_OBJECT_RUNLOCK(lobject); 1757 } 1758 } 1759 1760 /* 1761 * Hold each of the physical pages that are mapped by the specified range of 1762 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1763 * and allow the specified types of access, "prot". If all of the implied 1764 * pages are successfully held, then the number of held pages is returned 1765 * together with pointers to those pages in the array "ma". However, if any 1766 * of the pages cannot be held, -1 is returned. 1767 */ 1768 int 1769 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1770 vm_prot_t prot, vm_page_t *ma, int max_count) 1771 { 1772 vm_offset_t end, va; 1773 vm_page_t *mp; 1774 int count; 1775 boolean_t pmap_failed; 1776 1777 if (len == 0) 1778 return (0); 1779 end = round_page(addr + len); 1780 addr = trunc_page(addr); 1781 1782 if (!vm_map_range_valid(map, addr, end)) 1783 return (-1); 1784 1785 if (atop(end - addr) > max_count) 1786 panic("vm_fault_quick_hold_pages: count > max_count"); 1787 count = atop(end - addr); 1788 1789 /* 1790 * Most likely, the physical pages are resident in the pmap, so it is 1791 * faster to try pmap_extract_and_hold() first. 1792 */ 1793 pmap_failed = FALSE; 1794 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1795 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1796 if (*mp == NULL) 1797 pmap_failed = TRUE; 1798 else if ((prot & VM_PROT_WRITE) != 0 && 1799 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1800 /* 1801 * Explicitly dirty the physical page. Otherwise, the 1802 * caller's changes may go unnoticed because they are 1803 * performed through an unmanaged mapping or by a DMA 1804 * operation. 1805 * 1806 * The object lock is not held here. 1807 * See vm_page_clear_dirty_mask(). 1808 */ 1809 vm_page_dirty(*mp); 1810 } 1811 } 1812 if (pmap_failed) { 1813 /* 1814 * One or more pages could not be held by the pmap. Either no 1815 * page was mapped at the specified virtual address or that 1816 * mapping had insufficient permissions. Attempt to fault in 1817 * and hold these pages. 1818 * 1819 * If vm_fault_disable_pagefaults() was called, 1820 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1821 * acquire MD VM locks, which means we must not call 1822 * vm_fault(). Some (out of tree) callers mark 1823 * too wide a code area with vm_fault_disable_pagefaults() 1824 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1825 * the proper behaviour explicitly. 1826 */ 1827 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1828 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1829 goto error; 1830 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1831 if (*mp == NULL && vm_fault(map, va, prot, 1832 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1833 goto error; 1834 } 1835 return (count); 1836 error: 1837 for (mp = ma; mp < ma + count; mp++) 1838 if (*mp != NULL) 1839 vm_page_unwire(*mp, PQ_INACTIVE); 1840 return (-1); 1841 } 1842 1843 /* 1844 * Routine: 1845 * vm_fault_copy_entry 1846 * Function: 1847 * Create new shadow object backing dst_entry with private copy of 1848 * all underlying pages. When src_entry is equal to dst_entry, 1849 * function implements COW for wired-down map entry. Otherwise, 1850 * it forks wired entry into dst_map. 1851 * 1852 * In/out conditions: 1853 * The source and destination maps must be locked for write. 1854 * The source map entry must be wired down (or be a sharing map 1855 * entry corresponding to a main map entry that is wired down). 1856 */ 1857 void 1858 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1859 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1860 vm_ooffset_t *fork_charge) 1861 { 1862 vm_object_t backing_object, dst_object, object, src_object; 1863 vm_pindex_t dst_pindex, pindex, src_pindex; 1864 vm_prot_t access, prot; 1865 vm_offset_t vaddr; 1866 vm_page_t dst_m; 1867 vm_page_t src_m; 1868 boolean_t upgrade; 1869 1870 #ifdef lint 1871 src_map++; 1872 #endif /* lint */ 1873 1874 upgrade = src_entry == dst_entry; 1875 access = prot = dst_entry->protection; 1876 1877 src_object = src_entry->object.vm_object; 1878 src_pindex = OFF_TO_IDX(src_entry->offset); 1879 1880 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1881 dst_object = src_object; 1882 vm_object_reference(dst_object); 1883 } else { 1884 /* 1885 * Create the top-level object for the destination entry. 1886 * Doesn't actually shadow anything - we copy the pages 1887 * directly. 1888 */ 1889 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1890 dst_entry->start), NULL, NULL, 0); 1891 #if VM_NRESERVLEVEL > 0 1892 dst_object->flags |= OBJ_COLORED; 1893 dst_object->pg_color = atop(dst_entry->start); 1894 #endif 1895 dst_object->domain = src_object->domain; 1896 dst_object->charge = dst_entry->end - dst_entry->start; 1897 } 1898 1899 VM_OBJECT_WLOCK(dst_object); 1900 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1901 ("vm_fault_copy_entry: vm_object not NULL")); 1902 if (src_object != dst_object) { 1903 dst_entry->object.vm_object = dst_object; 1904 dst_entry->offset = 0; 1905 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 1906 } 1907 if (fork_charge != NULL) { 1908 KASSERT(dst_entry->cred == NULL, 1909 ("vm_fault_copy_entry: leaked swp charge")); 1910 dst_object->cred = curthread->td_ucred; 1911 crhold(dst_object->cred); 1912 *fork_charge += dst_object->charge; 1913 } else if ((dst_object->type == OBJT_DEFAULT || 1914 dst_object->type == OBJT_SWAP) && 1915 dst_object->cred == NULL) { 1916 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 1917 dst_entry)); 1918 dst_object->cred = dst_entry->cred; 1919 dst_entry->cred = NULL; 1920 } 1921 1922 /* 1923 * If not an upgrade, then enter the mappings in the pmap as 1924 * read and/or execute accesses. Otherwise, enter them as 1925 * write accesses. 1926 * 1927 * A writeable large page mapping is only created if all of 1928 * the constituent small page mappings are modified. Marking 1929 * PTEs as modified on inception allows promotion to happen 1930 * without taking potentially large number of soft faults. 1931 */ 1932 if (!upgrade) 1933 access &= ~VM_PROT_WRITE; 1934 1935 /* 1936 * Loop through all of the virtual pages within the entry's 1937 * range, copying each page from the source object to the 1938 * destination object. Since the source is wired, those pages 1939 * must exist. In contrast, the destination is pageable. 1940 * Since the destination object doesn't share any backing storage 1941 * with the source object, all of its pages must be dirtied, 1942 * regardless of whether they can be written. 1943 */ 1944 for (vaddr = dst_entry->start, dst_pindex = 0; 1945 vaddr < dst_entry->end; 1946 vaddr += PAGE_SIZE, dst_pindex++) { 1947 again: 1948 /* 1949 * Find the page in the source object, and copy it in. 1950 * Because the source is wired down, the page will be 1951 * in memory. 1952 */ 1953 if (src_object != dst_object) 1954 VM_OBJECT_RLOCK(src_object); 1955 object = src_object; 1956 pindex = src_pindex + dst_pindex; 1957 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1958 (backing_object = object->backing_object) != NULL) { 1959 /* 1960 * Unless the source mapping is read-only or 1961 * it is presently being upgraded from 1962 * read-only, the first object in the shadow 1963 * chain should provide all of the pages. In 1964 * other words, this loop body should never be 1965 * executed when the source mapping is already 1966 * read/write. 1967 */ 1968 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 1969 upgrade, 1970 ("vm_fault_copy_entry: main object missing page")); 1971 1972 VM_OBJECT_RLOCK(backing_object); 1973 pindex += OFF_TO_IDX(object->backing_object_offset); 1974 if (object != dst_object) 1975 VM_OBJECT_RUNLOCK(object); 1976 object = backing_object; 1977 } 1978 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 1979 1980 if (object != dst_object) { 1981 /* 1982 * Allocate a page in the destination object. 1983 */ 1984 dst_m = vm_page_alloc(dst_object, (src_object == 1985 dst_object ? src_pindex : 0) + dst_pindex, 1986 VM_ALLOC_NORMAL); 1987 if (dst_m == NULL) { 1988 VM_OBJECT_WUNLOCK(dst_object); 1989 VM_OBJECT_RUNLOCK(object); 1990 vm_wait(dst_object); 1991 VM_OBJECT_WLOCK(dst_object); 1992 goto again; 1993 } 1994 pmap_copy_page(src_m, dst_m); 1995 VM_OBJECT_RUNLOCK(object); 1996 dst_m->dirty = dst_m->valid = src_m->valid; 1997 } else { 1998 dst_m = src_m; 1999 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2000 goto again; 2001 if (dst_m->pindex >= dst_object->size) { 2002 /* 2003 * We are upgrading. Index can occur 2004 * out of bounds if the object type is 2005 * vnode and the file was truncated. 2006 */ 2007 vm_page_xunbusy(dst_m); 2008 break; 2009 } 2010 } 2011 VM_OBJECT_WUNLOCK(dst_object); 2012 2013 /* 2014 * Enter it in the pmap. If a wired, copy-on-write 2015 * mapping is being replaced by a write-enabled 2016 * mapping, then wire that new mapping. 2017 * 2018 * The page can be invalid if the user called 2019 * msync(MS_INVALIDATE) or truncated the backing vnode 2020 * or shared memory object. In this case, do not 2021 * insert it into pmap, but still do the copy so that 2022 * all copies of the wired map entry have similar 2023 * backing pages. 2024 */ 2025 if (vm_page_all_valid(dst_m)) { 2026 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2027 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2028 } 2029 2030 /* 2031 * Mark it no longer busy, and put it on the active list. 2032 */ 2033 VM_OBJECT_WLOCK(dst_object); 2034 2035 if (upgrade) { 2036 if (src_m != dst_m) { 2037 vm_page_unwire(src_m, PQ_INACTIVE); 2038 vm_page_wire(dst_m); 2039 } else { 2040 KASSERT(vm_page_wired(dst_m), 2041 ("dst_m %p is not wired", dst_m)); 2042 } 2043 } else { 2044 vm_page_activate(dst_m); 2045 } 2046 vm_page_xunbusy(dst_m); 2047 } 2048 VM_OBJECT_WUNLOCK(dst_object); 2049 if (upgrade) { 2050 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2051 vm_object_deallocate(src_object); 2052 } 2053 } 2054 2055 /* 2056 * Block entry into the machine-independent layer's page fault handler by 2057 * the calling thread. Subsequent calls to vm_fault() by that thread will 2058 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2059 * spurious page faults. 2060 */ 2061 int 2062 vm_fault_disable_pagefaults(void) 2063 { 2064 2065 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2066 } 2067 2068 void 2069 vm_fault_enable_pagefaults(int save) 2070 { 2071 2072 curthread_pflags_restore(save); 2073 } 2074