1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * Page fault handling module. 72 */ 73 74 #include "opt_ktrace.h" 75 #include "opt_vm.h" 76 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/lock.h> 80 #include <sys/mman.h> 81 #include <sys/mutex.h> 82 #include <sys/pctrie.h> 83 #include <sys/proc.h> 84 #include <sys/racct.h> 85 #include <sys/refcount.h> 86 #include <sys/resourcevar.h> 87 #include <sys/rwlock.h> 88 #include <sys/sched.h> 89 #include <sys/sf_buf.h> 90 #include <sys/signalvar.h> 91 #include <sys/sysctl.h> 92 #include <sys/sysent.h> 93 #include <sys/vmmeter.h> 94 #include <sys/vnode.h> 95 #ifdef KTRACE 96 #include <sys/ktrace.h> 97 #endif 98 99 #include <vm/vm.h> 100 #include <vm/vm_param.h> 101 #include <vm/pmap.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_object.h> 104 #include <vm/vm_page.h> 105 #include <vm/vm_pageout.h> 106 #include <vm/vm_kern.h> 107 #include <vm/vm_pager.h> 108 #include <vm/vm_radix.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_reserv.h> 111 112 #define PFBAK 4 113 #define PFFOR 4 114 115 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 116 117 #define VM_FAULT_DONTNEED_MIN 1048576 118 119 struct faultstate { 120 /* Fault parameters. */ 121 vm_offset_t vaddr; 122 vm_page_t *m_hold; 123 vm_prot_t fault_type; 124 vm_prot_t prot; 125 int fault_flags; 126 boolean_t wired; 127 128 /* Control state. */ 129 struct timeval oom_start_time; 130 bool oom_started; 131 int nera; 132 bool can_read_lock; 133 134 /* Page reference for cow. */ 135 vm_page_t m_cow; 136 137 /* Current object. */ 138 vm_object_t object; 139 vm_pindex_t pindex; 140 vm_page_t m; 141 bool m_needs_zeroing; 142 143 /* Top-level map object. */ 144 vm_object_t first_object; 145 vm_pindex_t first_pindex; 146 vm_page_t first_m; 147 148 /* Map state. */ 149 vm_map_t map; 150 vm_map_entry_t entry; 151 int map_generation; 152 bool lookup_still_valid; 153 154 /* Vnode if locked. */ 155 struct vnode *vp; 156 }; 157 158 /* 159 * Return codes for internal fault routines. 160 */ 161 enum fault_status { 162 FAULT_SUCCESS = 10000, /* Return success to user. */ 163 FAULT_FAILURE, /* Return failure to user. */ 164 FAULT_CONTINUE, /* Continue faulting. */ 165 FAULT_RESTART, /* Restart fault. */ 166 FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 167 FAULT_HARD, /* Performed I/O. */ 168 FAULT_SOFT, /* Found valid page. */ 169 FAULT_PROTECTION_FAILURE, /* Invalid access. */ 170 }; 171 172 enum fault_next_status { 173 FAULT_NEXT_GOTOBJ = 1, 174 FAULT_NEXT_NOOBJ, 175 FAULT_NEXT_RESTART, 176 }; 177 178 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 179 int ahead); 180 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 181 int backward, int forward, bool obj_locked); 182 183 static int vm_pfault_oom_attempts = 3; 184 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 185 &vm_pfault_oom_attempts, 0, 186 "Number of page allocation attempts in page fault handler before it " 187 "triggers OOM handling"); 188 189 static int vm_pfault_oom_wait = 10; 190 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 191 &vm_pfault_oom_wait, 0, 192 "Number of seconds to wait for free pages before retrying " 193 "the page fault handler"); 194 195 static inline void 196 vm_fault_page_release(vm_page_t *mp) 197 { 198 vm_page_t m; 199 200 m = *mp; 201 if (m != NULL) { 202 /* 203 * We are likely to loop around again and attempt to busy 204 * this page. Deactivating it leaves it available for 205 * pageout while optimizing fault restarts. 206 */ 207 vm_page_deactivate(m); 208 if (vm_page_xbusied(m)) 209 vm_page_xunbusy(m); 210 else 211 vm_page_sunbusy(m); 212 *mp = NULL; 213 } 214 } 215 216 static inline void 217 vm_fault_page_free(vm_page_t *mp) 218 { 219 vm_page_t m; 220 221 m = *mp; 222 if (m != NULL) { 223 VM_OBJECT_ASSERT_WLOCKED(m->object); 224 if (!vm_page_wired(m)) 225 vm_page_free(m); 226 else 227 vm_page_xunbusy(m); 228 *mp = NULL; 229 } 230 } 231 232 /* 233 * Return true if a vm_pager_get_pages() call is needed in order to check 234 * whether the pager might have a particular page, false if it can be determined 235 * immediately that the pager can not have a copy. For swap objects, this can 236 * be checked quickly. 237 */ 238 static inline bool 239 vm_fault_object_needs_getpages(vm_object_t object) 240 { 241 VM_OBJECT_ASSERT_LOCKED(object); 242 243 return ((object->flags & OBJ_SWAP) == 0 || 244 !pctrie_is_empty(&object->un_pager.swp.swp_blks)); 245 } 246 247 static inline void 248 vm_fault_unlock_map(struct faultstate *fs) 249 { 250 251 if (fs->lookup_still_valid) { 252 vm_map_lookup_done(fs->map, fs->entry); 253 fs->lookup_still_valid = false; 254 } 255 } 256 257 static void 258 vm_fault_unlock_vp(struct faultstate *fs) 259 { 260 261 if (fs->vp != NULL) { 262 vput(fs->vp); 263 fs->vp = NULL; 264 } 265 } 266 267 static bool 268 vm_fault_might_be_cow(struct faultstate *fs) 269 { 270 return (fs->object != fs->first_object); 271 } 272 273 static void 274 vm_fault_deallocate(struct faultstate *fs) 275 { 276 277 fs->m_needs_zeroing = true; 278 vm_fault_page_release(&fs->m_cow); 279 vm_fault_page_release(&fs->m); 280 vm_object_pip_wakeup(fs->object); 281 if (vm_fault_might_be_cow(fs)) { 282 VM_OBJECT_WLOCK(fs->first_object); 283 vm_fault_page_free(&fs->first_m); 284 VM_OBJECT_WUNLOCK(fs->first_object); 285 vm_object_pip_wakeup(fs->first_object); 286 } 287 vm_object_deallocate(fs->first_object); 288 vm_fault_unlock_map(fs); 289 vm_fault_unlock_vp(fs); 290 } 291 292 static void 293 vm_fault_unlock_and_deallocate(struct faultstate *fs) 294 { 295 296 VM_OBJECT_UNLOCK(fs->object); 297 vm_fault_deallocate(fs); 298 } 299 300 static void 301 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 302 { 303 bool need_dirty; 304 305 if (((fs->prot & VM_PROT_WRITE) == 0 && 306 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 307 (m->oflags & VPO_UNMANAGED) != 0) 308 return; 309 310 VM_PAGE_OBJECT_BUSY_ASSERT(m); 311 312 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 313 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 314 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 315 316 vm_object_set_writeable_dirty(m->object); 317 318 /* 319 * If the fault is a write, we know that this page is being 320 * written NOW so dirty it explicitly to save on 321 * pmap_is_modified() calls later. 322 * 323 * Also, since the page is now dirty, we can possibly tell 324 * the pager to release any swap backing the page. 325 */ 326 if (need_dirty && vm_page_set_dirty(m) == 0) { 327 /* 328 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 329 * if the page is already dirty to prevent data written with 330 * the expectation of being synced from not being synced. 331 * Likewise if this entry does not request NOSYNC then make 332 * sure the page isn't marked NOSYNC. Applications sharing 333 * data should use the same flags to avoid ping ponging. 334 */ 335 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 336 vm_page_aflag_set(m, PGA_NOSYNC); 337 else 338 vm_page_aflag_clear(m, PGA_NOSYNC); 339 } 340 341 } 342 343 static bool 344 vm_fault_is_read(const struct faultstate *fs) 345 { 346 return ((fs->prot & VM_PROT_WRITE) == 0 && 347 (fs->fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) == 0); 348 } 349 350 /* 351 * Unlocks fs.first_object and fs.map on success. 352 */ 353 static enum fault_status 354 vm_fault_soft_fast(struct faultstate *fs) 355 { 356 vm_page_t m, m_map; 357 #if VM_NRESERVLEVEL > 0 358 vm_page_t m_super; 359 int flags; 360 #endif 361 int psind; 362 vm_offset_t vaddr; 363 364 MPASS(fs->vp == NULL); 365 366 /* 367 * If we fail, vast majority of the time it is because the page is not 368 * there to begin with. Opportunistically perform the lookup and 369 * subsequent checks without the object lock, revalidate later. 370 * 371 * Note: a busy page can be mapped for read|execute access. 372 */ 373 m = vm_page_lookup_unlocked(fs->first_object, fs->first_pindex); 374 if (m == NULL || !vm_page_all_valid(m) || 375 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) { 376 VM_OBJECT_WLOCK(fs->first_object); 377 return (FAULT_FAILURE); 378 } 379 380 vaddr = fs->vaddr; 381 382 VM_OBJECT_RLOCK(fs->first_object); 383 384 /* 385 * Now that we stabilized the state, revalidate the page is in the shape 386 * we encountered above. 387 */ 388 389 if (m->object != fs->first_object || m->pindex != fs->first_pindex) 390 goto fail; 391 392 vm_object_busy(fs->first_object); 393 394 if (!vm_page_all_valid(m) || 395 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) 396 goto fail_busy; 397 398 m_map = m; 399 psind = 0; 400 #if VM_NRESERVLEVEL > 0 401 if ((m->flags & PG_FICTITIOUS) == 0 && 402 (m_super = vm_reserv_to_superpage(m)) != NULL) { 403 psind = m_super->psind; 404 KASSERT(psind > 0, 405 ("psind %d of m_super %p < 1", psind, m_super)); 406 flags = PS_ALL_VALID; 407 if ((fs->prot & VM_PROT_WRITE) != 0) { 408 /* 409 * Create a superpage mapping allowing write access 410 * only if none of the constituent pages are busy and 411 * all of them are already dirty (except possibly for 412 * the page that was faulted on). 413 */ 414 flags |= PS_NONE_BUSY; 415 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 416 flags |= PS_ALL_DIRTY; 417 } 418 while (rounddown2(vaddr, pagesizes[psind]) < fs->entry->start || 419 roundup2(vaddr + 1, pagesizes[psind]) > fs->entry->end || 420 (vaddr & (pagesizes[psind] - 1)) != 421 (VM_PAGE_TO_PHYS(m) & (pagesizes[psind] - 1)) || 422 !vm_page_ps_test(m_super, psind, flags, m) || 423 !pmap_ps_enabled(fs->map->pmap)) { 424 psind--; 425 if (psind == 0) 426 break; 427 m_super += rounddown2(m - m_super, 428 atop(pagesizes[psind])); 429 KASSERT(m_super->psind >= psind, 430 ("psind %d of m_super %p < %d", m_super->psind, 431 m_super, psind)); 432 } 433 if (psind > 0) { 434 m_map = m_super; 435 vaddr = rounddown2(vaddr, pagesizes[psind]); 436 /* Preset the modified bit for dirty superpages. */ 437 if ((flags & PS_ALL_DIRTY) != 0) 438 fs->fault_type |= VM_PROT_WRITE; 439 } 440 } 441 #endif 442 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 443 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 444 KERN_SUCCESS) 445 goto fail_busy; 446 if (fs->m_hold != NULL) { 447 (*fs->m_hold) = m; 448 vm_page_wire(m); 449 } 450 if (psind == 0 && !fs->wired) 451 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 452 VM_OBJECT_RUNLOCK(fs->first_object); 453 vm_fault_dirty(fs, m); 454 vm_object_unbusy(fs->first_object); 455 vm_map_lookup_done(fs->map, fs->entry); 456 curthread->td_ru.ru_minflt++; 457 return (FAULT_SUCCESS); 458 fail_busy: 459 vm_object_unbusy(fs->first_object); 460 fail: 461 if (!VM_OBJECT_TRYUPGRADE(fs->first_object)) { 462 VM_OBJECT_RUNLOCK(fs->first_object); 463 VM_OBJECT_WLOCK(fs->first_object); 464 } 465 return (FAULT_FAILURE); 466 } 467 468 static void 469 vm_fault_restore_map_lock(struct faultstate *fs) 470 { 471 472 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 473 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 474 475 if (!vm_map_trylock_read(fs->map)) { 476 VM_OBJECT_WUNLOCK(fs->first_object); 477 vm_map_lock_read(fs->map); 478 VM_OBJECT_WLOCK(fs->first_object); 479 } 480 fs->lookup_still_valid = true; 481 } 482 483 static void 484 vm_fault_populate_check_page(vm_page_t m) 485 { 486 487 /* 488 * Check each page to ensure that the pager is obeying the 489 * interface: the page must be installed in the object, fully 490 * valid, and exclusively busied. 491 */ 492 MPASS(m != NULL); 493 MPASS(vm_page_all_valid(m)); 494 MPASS(vm_page_xbusied(m)); 495 } 496 497 static void 498 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 499 vm_pindex_t last) 500 { 501 struct pctrie_iter pages; 502 vm_page_t m; 503 504 VM_OBJECT_ASSERT_WLOCKED(object); 505 MPASS(first <= last); 506 vm_page_iter_limit_init(&pages, object, last + 1); 507 VM_RADIX_FORALL_FROM(m, &pages, first) { 508 vm_fault_populate_check_page(m); 509 vm_page_deactivate(m); 510 vm_page_xunbusy(m); 511 } 512 KASSERT(pages.index == last, 513 ("%s: Object %p first %#jx last %#jx index %#jx", 514 __func__, object, (uintmax_t)first, (uintmax_t)last, 515 (uintmax_t)pages.index)); 516 } 517 518 static enum fault_status 519 vm_fault_populate(struct faultstate *fs) 520 { 521 vm_offset_t vaddr; 522 vm_page_t m; 523 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 524 int bdry_idx, i, npages, psind, rv; 525 enum fault_status res; 526 527 MPASS(fs->object == fs->first_object); 528 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 529 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 530 MPASS(fs->first_object->backing_object == NULL); 531 MPASS(fs->lookup_still_valid); 532 533 pager_first = OFF_TO_IDX(fs->entry->offset); 534 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 535 vm_fault_unlock_map(fs); 536 vm_fault_unlock_vp(fs); 537 538 res = FAULT_SUCCESS; 539 540 /* 541 * Call the pager (driver) populate() method. 542 * 543 * There is no guarantee that the method will be called again 544 * if the current fault is for read, and a future fault is 545 * for write. Report the entry's maximum allowed protection 546 * to the driver. 547 */ 548 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 549 fs->fault_type, fs->entry->max_protection, &pager_first, 550 &pager_last); 551 552 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 553 if (rv == VM_PAGER_BAD) { 554 /* 555 * VM_PAGER_BAD is the backdoor for a pager to request 556 * normal fault handling. 557 */ 558 vm_fault_restore_map_lock(fs); 559 if (fs->map->timestamp != fs->map_generation) 560 return (FAULT_RESTART); 561 return (FAULT_CONTINUE); 562 } 563 if (rv != VM_PAGER_OK) 564 return (FAULT_FAILURE); /* AKA SIGSEGV */ 565 566 /* Ensure that the driver is obeying the interface. */ 567 MPASS(pager_first <= pager_last); 568 MPASS(fs->first_pindex <= pager_last); 569 MPASS(fs->first_pindex >= pager_first); 570 MPASS(pager_last < fs->first_object->size); 571 572 vm_fault_restore_map_lock(fs); 573 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(fs->entry); 574 if (fs->map->timestamp != fs->map_generation) { 575 if (bdry_idx == 0) { 576 vm_fault_populate_cleanup(fs->first_object, pager_first, 577 pager_last); 578 } else { 579 m = vm_page_lookup(fs->first_object, pager_first); 580 if (m != fs->m) 581 vm_page_xunbusy(m); 582 } 583 return (FAULT_RESTART); 584 } 585 586 /* 587 * The map is unchanged after our last unlock. Process the fault. 588 * 589 * First, the special case of largepage mappings, where 590 * populate only busies the first page in superpage run. 591 */ 592 if (bdry_idx != 0) { 593 KASSERT(PMAP_HAS_LARGEPAGES, 594 ("missing pmap support for large pages")); 595 m = vm_page_lookup(fs->first_object, pager_first); 596 vm_fault_populate_check_page(m); 597 VM_OBJECT_WUNLOCK(fs->first_object); 598 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 599 fs->entry->offset; 600 /* assert alignment for entry */ 601 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 602 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 603 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 604 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 605 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 606 ("unaligned superpage m %p %#jx", m, 607 (uintmax_t)VM_PAGE_TO_PHYS(m))); 608 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 609 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 610 PMAP_ENTER_LARGEPAGE, bdry_idx); 611 VM_OBJECT_WLOCK(fs->first_object); 612 vm_page_xunbusy(m); 613 if (rv != KERN_SUCCESS) { 614 res = FAULT_FAILURE; 615 goto out; 616 } 617 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 618 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 619 vm_page_wire(m + i); 620 } 621 if (fs->m_hold != NULL) { 622 *fs->m_hold = m + (fs->first_pindex - pager_first); 623 vm_page_wire(*fs->m_hold); 624 } 625 goto out; 626 } 627 628 /* 629 * The range [pager_first, pager_last] that is given to the 630 * pager is only a hint. The pager may populate any range 631 * within the object that includes the requested page index. 632 * In case the pager expanded the range, clip it to fit into 633 * the map entry. 634 */ 635 map_first = OFF_TO_IDX(fs->entry->offset); 636 if (map_first > pager_first) { 637 vm_fault_populate_cleanup(fs->first_object, pager_first, 638 map_first - 1); 639 pager_first = map_first; 640 } 641 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 642 if (map_last < pager_last) { 643 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 644 pager_last); 645 pager_last = map_last; 646 } 647 for (pidx = pager_first; pidx <= pager_last; pidx += npages) { 648 m = vm_page_lookup(fs->first_object, pidx); 649 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 650 KASSERT(m != NULL && m->pindex == pidx, 651 ("%s: pindex mismatch", __func__)); 652 psind = m->psind; 653 while (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 654 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 655 !pmap_ps_enabled(fs->map->pmap))) 656 psind--; 657 658 npages = atop(pagesizes[psind]); 659 for (i = 0; i < npages; i++) { 660 vm_fault_populate_check_page(&m[i]); 661 vm_fault_dirty(fs, &m[i]); 662 } 663 VM_OBJECT_WUNLOCK(fs->first_object); 664 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 665 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 666 667 /* 668 * pmap_enter() may fail for a superpage mapping if additional 669 * protection policies prevent the full mapping. 670 * For example, this will happen on amd64 if the entire 671 * address range does not share the same userspace protection 672 * key. Revert to single-page mappings if this happens. 673 */ 674 MPASS(rv == KERN_SUCCESS || 675 (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 676 if (__predict_false(psind > 0 && 677 rv == KERN_PROTECTION_FAILURE)) { 678 MPASS(!fs->wired); 679 for (i = 0; i < npages; i++) { 680 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 681 &m[i], fs->prot, fs->fault_type, 0); 682 MPASS(rv == KERN_SUCCESS); 683 } 684 } 685 686 VM_OBJECT_WLOCK(fs->first_object); 687 for (i = 0; i < npages; i++) { 688 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && 689 m[i].pindex == fs->first_pindex) 690 vm_page_wire(&m[i]); 691 else 692 vm_page_activate(&m[i]); 693 if (fs->m_hold != NULL && 694 m[i].pindex == fs->first_pindex) { 695 (*fs->m_hold) = &m[i]; 696 vm_page_wire(&m[i]); 697 } 698 vm_page_xunbusy(&m[i]); 699 } 700 } 701 out: 702 curthread->td_ru.ru_majflt++; 703 return (res); 704 } 705 706 static int prot_fault_translation; 707 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 708 &prot_fault_translation, 0, 709 "Control signal to deliver on protection fault"); 710 711 /* compat definition to keep common code for signal translation */ 712 #define UCODE_PAGEFLT 12 713 #ifdef T_PAGEFLT 714 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 715 #endif 716 717 /* 718 * vm_fault_trap: 719 * 720 * Helper for the machine-dependent page fault trap handlers, wrapping 721 * vm_fault(). Issues ktrace(2) tracepoints for the faults. 722 * 723 * If the fault cannot be handled successfully by updating the 724 * required mapping, and the faulted instruction cannot be restarted, 725 * the signal number and si_code values are returned for trapsignal() 726 * to deliver. 727 * 728 * Returns Mach error codes, but callers should only check for 729 * KERN_SUCCESS. 730 */ 731 int 732 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 733 int fault_flags, int *signo, int *ucode) 734 { 735 int result; 736 737 MPASS(signo == NULL || ucode != NULL); 738 #ifdef KTRACE 739 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 740 ktrfault(vaddr, fault_type); 741 #endif 742 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 743 NULL); 744 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 745 result == KERN_INVALID_ADDRESS || 746 result == KERN_RESOURCE_SHORTAGE || 747 result == KERN_PROTECTION_FAILURE || 748 result == KERN_OUT_OF_BOUNDS, 749 ("Unexpected Mach error %d from vm_fault()", result)); 750 #ifdef KTRACE 751 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 752 ktrfaultend(result); 753 #endif 754 if (result != KERN_SUCCESS && signo != NULL) { 755 switch (result) { 756 case KERN_FAILURE: 757 case KERN_INVALID_ADDRESS: 758 *signo = SIGSEGV; 759 *ucode = SEGV_MAPERR; 760 break; 761 case KERN_RESOURCE_SHORTAGE: 762 *signo = SIGBUS; 763 *ucode = BUS_OOMERR; 764 break; 765 case KERN_OUT_OF_BOUNDS: 766 *signo = SIGBUS; 767 *ucode = BUS_OBJERR; 768 break; 769 case KERN_PROTECTION_FAILURE: 770 if (prot_fault_translation == 0) { 771 /* 772 * Autodetect. This check also covers 773 * the images without the ABI-tag ELF 774 * note. 775 */ 776 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 777 curproc->p_osrel >= P_OSREL_SIGSEGV) { 778 *signo = SIGSEGV; 779 *ucode = SEGV_ACCERR; 780 } else { 781 *signo = SIGBUS; 782 *ucode = UCODE_PAGEFLT; 783 } 784 } else if (prot_fault_translation == 1) { 785 /* Always compat mode. */ 786 *signo = SIGBUS; 787 *ucode = UCODE_PAGEFLT; 788 } else { 789 /* Always SIGSEGV mode. */ 790 *signo = SIGSEGV; 791 *ucode = SEGV_ACCERR; 792 } 793 break; 794 default: 795 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 796 result)); 797 break; 798 } 799 } 800 return (result); 801 } 802 803 static bool 804 vm_fault_object_ensure_wlocked(struct faultstate *fs) 805 { 806 if (fs->object == fs->first_object) 807 VM_OBJECT_ASSERT_WLOCKED(fs->object); 808 809 if (!fs->can_read_lock) { 810 VM_OBJECT_ASSERT_WLOCKED(fs->object); 811 return (true); 812 } 813 814 if (VM_OBJECT_WOWNED(fs->object)) 815 return (true); 816 817 if (VM_OBJECT_TRYUPGRADE(fs->object)) 818 return (true); 819 820 return (false); 821 } 822 823 static enum fault_status 824 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 825 { 826 struct vnode *vp; 827 int error, locked; 828 829 if (fs->object->type != OBJT_VNODE) 830 return (FAULT_CONTINUE); 831 vp = fs->object->handle; 832 if (vp == fs->vp) { 833 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 834 return (FAULT_CONTINUE); 835 } 836 837 /* 838 * Perform an unlock in case the desired vnode changed while 839 * the map was unlocked during a retry. 840 */ 841 vm_fault_unlock_vp(fs); 842 843 locked = VOP_ISLOCKED(vp); 844 if (locked != LK_EXCLUSIVE) 845 locked = LK_SHARED; 846 847 /* 848 * We must not sleep acquiring the vnode lock while we have 849 * the page exclusive busied or the object's 850 * paging-in-progress count incremented. Otherwise, we could 851 * deadlock. 852 */ 853 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 854 if (error == 0) { 855 fs->vp = vp; 856 return (FAULT_CONTINUE); 857 } 858 859 vhold(vp); 860 if (objlocked) 861 vm_fault_unlock_and_deallocate(fs); 862 else 863 vm_fault_deallocate(fs); 864 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 865 vdrop(vp); 866 fs->vp = vp; 867 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 868 return (FAULT_RESTART); 869 } 870 871 /* 872 * Calculate the desired readahead. Handle drop-behind. 873 * 874 * Returns the number of readahead blocks to pass to the pager. 875 */ 876 static int 877 vm_fault_readahead(struct faultstate *fs) 878 { 879 int era, nera; 880 u_char behavior; 881 882 KASSERT(fs->lookup_still_valid, ("map unlocked")); 883 era = fs->entry->read_ahead; 884 behavior = vm_map_entry_behavior(fs->entry); 885 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 886 nera = 0; 887 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 888 nera = VM_FAULT_READ_AHEAD_MAX; 889 if (fs->vaddr == fs->entry->next_read) 890 vm_fault_dontneed(fs, fs->vaddr, nera); 891 } else if (fs->vaddr == fs->entry->next_read) { 892 /* 893 * This is a sequential fault. Arithmetically 894 * increase the requested number of pages in 895 * the read-ahead window. The requested 896 * number of pages is "# of sequential faults 897 * x (read ahead min + 1) + read ahead min" 898 */ 899 nera = VM_FAULT_READ_AHEAD_MIN; 900 if (era > 0) { 901 nera += era + 1; 902 if (nera > VM_FAULT_READ_AHEAD_MAX) 903 nera = VM_FAULT_READ_AHEAD_MAX; 904 } 905 if (era == VM_FAULT_READ_AHEAD_MAX) 906 vm_fault_dontneed(fs, fs->vaddr, nera); 907 } else { 908 /* 909 * This is a non-sequential fault. 910 */ 911 nera = 0; 912 } 913 if (era != nera) { 914 /* 915 * A read lock on the map suffices to update 916 * the read ahead count safely. 917 */ 918 fs->entry->read_ahead = nera; 919 } 920 921 return (nera); 922 } 923 924 static int 925 vm_fault_lookup(struct faultstate *fs) 926 { 927 int result; 928 929 KASSERT(!fs->lookup_still_valid, 930 ("vm_fault_lookup: Map already locked.")); 931 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 932 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 933 &fs->first_pindex, &fs->prot, &fs->wired); 934 if (result != KERN_SUCCESS) { 935 vm_fault_unlock_vp(fs); 936 return (result); 937 } 938 939 fs->map_generation = fs->map->timestamp; 940 941 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 942 panic("%s: fault on nofault entry, addr: %#lx", 943 __func__, (u_long)fs->vaddr); 944 } 945 946 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 947 fs->entry->wiring_thread != curthread) { 948 vm_map_unlock_read(fs->map); 949 vm_map_lock(fs->map); 950 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 951 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 952 vm_fault_unlock_vp(fs); 953 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 954 vm_map_unlock_and_wait(fs->map, 0); 955 } else 956 vm_map_unlock(fs->map); 957 return (KERN_RESOURCE_SHORTAGE); 958 } 959 960 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 961 962 if (fs->wired) 963 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 964 else 965 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 966 ("!fs->wired && VM_FAULT_WIRE")); 967 fs->lookup_still_valid = true; 968 969 return (KERN_SUCCESS); 970 } 971 972 static int 973 vm_fault_relookup(struct faultstate *fs) 974 { 975 vm_object_t retry_object; 976 vm_pindex_t retry_pindex; 977 vm_prot_t retry_prot; 978 int result; 979 980 if (!vm_map_trylock_read(fs->map)) 981 return (KERN_RESTART); 982 983 fs->lookup_still_valid = true; 984 if (fs->map->timestamp == fs->map_generation) 985 return (KERN_SUCCESS); 986 987 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 988 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 989 &fs->wired); 990 if (result != KERN_SUCCESS) { 991 /* 992 * If retry of map lookup would have blocked then 993 * retry fault from start. 994 */ 995 if (result == KERN_FAILURE) 996 return (KERN_RESTART); 997 return (result); 998 } 999 if (retry_object != fs->first_object || 1000 retry_pindex != fs->first_pindex) 1001 return (KERN_RESTART); 1002 1003 /* 1004 * Check whether the protection has changed or the object has 1005 * been copied while we left the map unlocked. Changing from 1006 * read to write permission is OK - we leave the page 1007 * write-protected, and catch the write fault. Changing from 1008 * write to read permission means that we can't mark the page 1009 * write-enabled after all. 1010 */ 1011 fs->prot &= retry_prot; 1012 fs->fault_type &= retry_prot; 1013 if (fs->prot == 0) 1014 return (KERN_RESTART); 1015 1016 /* Reassert because wired may have changed. */ 1017 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 1018 ("!wired && VM_FAULT_WIRE")); 1019 1020 return (KERN_SUCCESS); 1021 } 1022 1023 static bool 1024 vm_fault_can_cow_rename(struct faultstate *fs) 1025 { 1026 return ( 1027 /* Only one shadow object and no other refs. */ 1028 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 1029 /* No other ways to look the object up. */ 1030 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0); 1031 } 1032 1033 static void 1034 vm_fault_cow(struct faultstate *fs) 1035 { 1036 bool is_first_object_locked, rename_cow; 1037 1038 KASSERT(vm_fault_might_be_cow(fs), 1039 ("source and target COW objects are identical")); 1040 1041 /* 1042 * This allows pages to be virtually copied from a backing_object 1043 * into the first_object, where the backing object has no other 1044 * refs to it, and cannot gain any more refs. Instead of a bcopy, 1045 * we just move the page from the backing object to the first 1046 * object. Note that we must mark the page dirty in the first 1047 * object so that it will go out to swap when needed. 1048 */ 1049 is_first_object_locked = false; 1050 rename_cow = false; 1051 1052 if (vm_fault_can_cow_rename(fs) && vm_page_xbusied(fs->m)) { 1053 /* 1054 * Check that we don't chase down the shadow chain and 1055 * we can acquire locks. Recheck the conditions for 1056 * rename after the shadow chain is stable after the 1057 * object locking. 1058 */ 1059 is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object); 1060 if (is_first_object_locked && 1061 fs->object == fs->first_object->backing_object) { 1062 if (VM_OBJECT_TRYWLOCK(fs->object)) { 1063 rename_cow = vm_fault_can_cow_rename(fs); 1064 if (!rename_cow) 1065 VM_OBJECT_WUNLOCK(fs->object); 1066 } 1067 } 1068 } 1069 1070 if (rename_cow) { 1071 vm_page_assert_xbusied(fs->m); 1072 1073 /* 1074 * Remove but keep xbusy for replace. fs->m is moved into 1075 * fs->first_object and left busy while fs->first_m is 1076 * conditionally freed. 1077 */ 1078 vm_page_remove_xbusy(fs->m); 1079 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 1080 fs->first_m); 1081 vm_page_dirty(fs->m); 1082 #if VM_NRESERVLEVEL > 0 1083 /* 1084 * Rename the reservation. 1085 */ 1086 vm_reserv_rename(fs->m, fs->first_object, fs->object, 1087 OFF_TO_IDX(fs->first_object->backing_object_offset)); 1088 #endif 1089 VM_OBJECT_WUNLOCK(fs->object); 1090 VM_OBJECT_WUNLOCK(fs->first_object); 1091 fs->first_m = fs->m; 1092 fs->m = NULL; 1093 VM_CNT_INC(v_cow_optim); 1094 } else { 1095 if (is_first_object_locked) 1096 VM_OBJECT_WUNLOCK(fs->first_object); 1097 /* 1098 * Oh, well, lets copy it. 1099 */ 1100 pmap_copy_page(fs->m, fs->first_m); 1101 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 1102 vm_page_wire(fs->first_m); 1103 vm_page_unwire(fs->m, PQ_INACTIVE); 1104 } 1105 /* 1106 * Save the COW page to be released after pmap_enter is 1107 * complete. The new copy will be marked valid when we're ready 1108 * to map it. 1109 */ 1110 fs->m_cow = fs->m; 1111 fs->m = NULL; 1112 1113 /* 1114 * Typically, the shadow object is either private to this 1115 * address space (OBJ_ONEMAPPING) or its pages are read only. 1116 * In the highly unusual case where the pages of a shadow object 1117 * are read/write shared between this and other address spaces, 1118 * we need to ensure that any pmap-level mappings to the 1119 * original, copy-on-write page from the backing object are 1120 * removed from those other address spaces. 1121 * 1122 * The flag check is racy, but this is tolerable: if 1123 * OBJ_ONEMAPPING is cleared after the check, the busy state 1124 * ensures that new mappings of m_cow can't be created. 1125 * pmap_enter() will replace an existing mapping in the current 1126 * address space. If OBJ_ONEMAPPING is set after the check, 1127 * removing mappings will at worse trigger some unnecessary page 1128 * faults. 1129 * 1130 * In the fs->m shared busy case, the xbusy state of 1131 * fs->first_m prevents new mappings of fs->m from 1132 * being created because a parallel fault on this 1133 * shadow chain should wait for xbusy on fs->first_m. 1134 */ 1135 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1136 pmap_remove_all(fs->m_cow); 1137 } 1138 1139 vm_object_pip_wakeup(fs->object); 1140 1141 /* 1142 * Only use the new page below... 1143 */ 1144 fs->object = fs->first_object; 1145 fs->pindex = fs->first_pindex; 1146 fs->m = fs->first_m; 1147 VM_CNT_INC(v_cow_faults); 1148 curthread->td_cow++; 1149 } 1150 1151 static enum fault_next_status 1152 vm_fault_next(struct faultstate *fs) 1153 { 1154 vm_object_t next_object; 1155 1156 if (fs->object == fs->first_object || !fs->can_read_lock) 1157 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1158 else 1159 VM_OBJECT_ASSERT_LOCKED(fs->object); 1160 1161 /* 1162 * The requested page does not exist at this object/ 1163 * offset. Remove the invalid page from the object, 1164 * waking up anyone waiting for it, and continue on to 1165 * the next object. However, if this is the top-level 1166 * object, we must leave the busy page in place to 1167 * prevent another process from rushing past us, and 1168 * inserting the page in that object at the same time 1169 * that we are. 1170 */ 1171 if (fs->object == fs->first_object) { 1172 fs->first_m = fs->m; 1173 fs->m = NULL; 1174 } else if (fs->m != NULL) { 1175 if (!vm_fault_object_ensure_wlocked(fs)) { 1176 fs->can_read_lock = false; 1177 vm_fault_unlock_and_deallocate(fs); 1178 return (FAULT_NEXT_RESTART); 1179 } 1180 vm_fault_page_free(&fs->m); 1181 } 1182 1183 /* 1184 * Move on to the next object. Lock the next object before 1185 * unlocking the current one. 1186 */ 1187 next_object = fs->object->backing_object; 1188 if (next_object == NULL) 1189 return (FAULT_NEXT_NOOBJ); 1190 MPASS(fs->first_m != NULL); 1191 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1192 if (fs->can_read_lock) 1193 VM_OBJECT_RLOCK(next_object); 1194 else 1195 VM_OBJECT_WLOCK(next_object); 1196 vm_object_pip_add(next_object, 1); 1197 if (fs->object != fs->first_object) 1198 vm_object_pip_wakeup(fs->object); 1199 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1200 VM_OBJECT_UNLOCK(fs->object); 1201 fs->object = next_object; 1202 1203 return (FAULT_NEXT_GOTOBJ); 1204 } 1205 1206 static void 1207 vm_fault_zerofill(struct faultstate *fs) 1208 { 1209 1210 /* 1211 * If there's no object left, fill the page in the top 1212 * object with zeros. 1213 */ 1214 if (vm_fault_might_be_cow(fs)) { 1215 vm_object_pip_wakeup(fs->object); 1216 fs->object = fs->first_object; 1217 fs->pindex = fs->first_pindex; 1218 } 1219 MPASS(fs->first_m != NULL); 1220 MPASS(fs->m == NULL); 1221 fs->m = fs->first_m; 1222 fs->first_m = NULL; 1223 1224 /* 1225 * Zero the page if necessary and mark it valid. 1226 */ 1227 if (fs->m_needs_zeroing) { 1228 pmap_zero_page(fs->m); 1229 } else { 1230 #ifdef INVARIANTS 1231 if (vm_check_pg_zero) { 1232 struct sf_buf *sf; 1233 unsigned long *p; 1234 int i; 1235 1236 sched_pin(); 1237 sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE); 1238 p = (unsigned long *)sf_buf_kva(sf); 1239 for (i = 0; i < PAGE_SIZE / sizeof(*p); i++, p++) { 1240 KASSERT(*p == 0, 1241 ("zerocheck failed page %p PG_ZERO %d %jx", 1242 fs->m, i, (uintmax_t)*p)); 1243 } 1244 sf_buf_free(sf); 1245 sched_unpin(); 1246 } 1247 #endif 1248 VM_CNT_INC(v_ozfod); 1249 } 1250 VM_CNT_INC(v_zfod); 1251 vm_page_valid(fs->m); 1252 } 1253 1254 /* 1255 * Initiate page fault after timeout. Returns true if caller should 1256 * do vm_waitpfault() after the call. 1257 */ 1258 static bool 1259 vm_fault_allocate_oom(struct faultstate *fs) 1260 { 1261 struct timeval now; 1262 1263 vm_fault_unlock_and_deallocate(fs); 1264 if (vm_pfault_oom_attempts < 0) 1265 return (true); 1266 if (!fs->oom_started) { 1267 fs->oom_started = true; 1268 getmicrotime(&fs->oom_start_time); 1269 return (true); 1270 } 1271 1272 getmicrotime(&now); 1273 timevalsub(&now, &fs->oom_start_time); 1274 if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1275 return (true); 1276 1277 if (bootverbose) 1278 printf( 1279 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1280 curproc->p_pid, curproc->p_comm); 1281 vm_pageout_oom(VM_OOM_MEM_PF); 1282 fs->oom_started = false; 1283 return (false); 1284 } 1285 1286 /* 1287 * Allocate a page directly or via the object populate method. 1288 */ 1289 static enum fault_status 1290 vm_fault_allocate(struct faultstate *fs, struct pctrie_iter *pages) 1291 { 1292 struct domainset *dset; 1293 enum fault_status res; 1294 1295 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1296 res = vm_fault_lock_vnode(fs, true); 1297 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1298 if (res == FAULT_RESTART) 1299 return (res); 1300 } 1301 1302 if (fs->pindex >= fs->object->size) { 1303 vm_fault_unlock_and_deallocate(fs); 1304 return (FAULT_OUT_OF_BOUNDS); 1305 } 1306 1307 if (fs->object == fs->first_object && 1308 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1309 fs->first_object->shadow_count == 0) { 1310 res = vm_fault_populate(fs); 1311 switch (res) { 1312 case FAULT_SUCCESS: 1313 case FAULT_FAILURE: 1314 case FAULT_RESTART: 1315 vm_fault_unlock_and_deallocate(fs); 1316 return (res); 1317 case FAULT_CONTINUE: 1318 pctrie_iter_reset(pages); 1319 /* 1320 * Pager's populate() method 1321 * returned VM_PAGER_BAD. 1322 */ 1323 break; 1324 default: 1325 panic("inconsistent return codes"); 1326 } 1327 } 1328 1329 /* 1330 * Allocate a new page for this object/offset pair. 1331 * 1332 * If the process has a fatal signal pending, prioritize the allocation 1333 * with the expectation that the process will exit shortly and free some 1334 * pages. In particular, the signal may have been posted by the page 1335 * daemon in an attempt to resolve an out-of-memory condition. 1336 * 1337 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1338 * might be not observed here, and allocation fails, causing a restart 1339 * and new reading of the p_flag. 1340 */ 1341 dset = fs->object->domain.dr_policy; 1342 if (dset == NULL) 1343 dset = curthread->td_domain.dr_policy; 1344 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1345 #if VM_NRESERVLEVEL > 0 1346 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1347 #endif 1348 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) { 1349 vm_fault_unlock_and_deallocate(fs); 1350 return (FAULT_FAILURE); 1351 } 1352 fs->m = vm_page_alloc_iter(fs->object, fs->pindex, 1353 P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0, pages); 1354 } 1355 if (fs->m == NULL) { 1356 if (vm_fault_allocate_oom(fs)) 1357 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1358 return (FAULT_RESTART); 1359 } 1360 fs->m_needs_zeroing = (fs->m->flags & PG_ZERO) == 0; 1361 fs->oom_started = false; 1362 1363 return (FAULT_CONTINUE); 1364 } 1365 1366 /* 1367 * Call the pager to retrieve the page if there is a chance 1368 * that the pager has it, and potentially retrieve additional 1369 * pages at the same time. 1370 */ 1371 static enum fault_status 1372 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 1373 { 1374 vm_offset_t e_end, e_start; 1375 int ahead, behind, cluster_offset, rv; 1376 enum fault_status status; 1377 u_char behavior; 1378 1379 /* 1380 * Prepare for unlocking the map. Save the map 1381 * entry's start and end addresses, which are used to 1382 * optimize the size of the pager operation below. 1383 * Even if the map entry's addresses change after 1384 * unlocking the map, using the saved addresses is 1385 * safe. 1386 */ 1387 e_start = fs->entry->start; 1388 e_end = fs->entry->end; 1389 behavior = vm_map_entry_behavior(fs->entry); 1390 1391 /* 1392 * If the pager for the current object might have 1393 * the page, then determine the number of additional 1394 * pages to read and potentially reprioritize 1395 * previously read pages for earlier reclamation. 1396 * These operations should only be performed once per 1397 * page fault. Even if the current pager doesn't 1398 * have the page, the number of additional pages to 1399 * read will apply to subsequent objects in the 1400 * shadow chain. 1401 */ 1402 if (fs->nera == -1 && !P_KILLED(curproc)) 1403 fs->nera = vm_fault_readahead(fs); 1404 1405 /* 1406 * Release the map lock before locking the vnode or 1407 * sleeping in the pager. (If the current object has 1408 * a shadow, then an earlier iteration of this loop 1409 * may have already unlocked the map.) 1410 */ 1411 vm_fault_unlock_map(fs); 1412 1413 status = vm_fault_lock_vnode(fs, false); 1414 MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1415 if (status == FAULT_RESTART) 1416 return (status); 1417 KASSERT(fs->vp == NULL || !vm_map_is_system(fs->map), 1418 ("vm_fault: vnode-backed object mapped by system map")); 1419 1420 /* 1421 * Page in the requested page and hint the pager, 1422 * that it may bring up surrounding pages. 1423 */ 1424 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1425 P_KILLED(curproc)) { 1426 behind = 0; 1427 ahead = 0; 1428 } else { 1429 /* Is this a sequential fault? */ 1430 if (fs->nera > 0) { 1431 behind = 0; 1432 ahead = fs->nera; 1433 } else { 1434 /* 1435 * Request a cluster of pages that is 1436 * aligned to a VM_FAULT_READ_DEFAULT 1437 * page offset boundary within the 1438 * object. Alignment to a page offset 1439 * boundary is more likely to coincide 1440 * with the underlying file system 1441 * block than alignment to a virtual 1442 * address boundary. 1443 */ 1444 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1445 behind = ulmin(cluster_offset, 1446 atop(fs->vaddr - e_start)); 1447 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1448 } 1449 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1450 } 1451 *behindp = behind; 1452 *aheadp = ahead; 1453 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1454 if (rv == VM_PAGER_OK) 1455 return (FAULT_HARD); 1456 if (rv == VM_PAGER_ERROR) 1457 printf("vm_fault: pager read error, pid %d (%s)\n", 1458 curproc->p_pid, curproc->p_comm); 1459 /* 1460 * If an I/O error occurred or the requested page was 1461 * outside the range of the pager, clean up and return 1462 * an error. 1463 */ 1464 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1465 VM_OBJECT_WLOCK(fs->object); 1466 vm_fault_page_free(&fs->m); 1467 vm_fault_unlock_and_deallocate(fs); 1468 return (FAULT_OUT_OF_BOUNDS); 1469 } 1470 KASSERT(rv == VM_PAGER_FAIL, 1471 ("%s: unexpected pager error %d", __func__, rv)); 1472 return (FAULT_CONTINUE); 1473 } 1474 1475 /* 1476 * Wait/Retry if the page is busy. We have to do this if the page is 1477 * either exclusive or shared busy because the vm_pager may be using 1478 * read busy for pageouts (and even pageins if it is the vnode pager), 1479 * and we could end up trying to pagein and pageout the same page 1480 * simultaneously. 1481 * 1482 * We allow the busy case on a read fault if the page is valid. We 1483 * cannot under any circumstances mess around with a shared busied 1484 * page except, perhaps, to pmap it. This is controlled by the 1485 * VM_ALLOC_SBUSY bit in the allocflags argument. 1486 */ 1487 static void 1488 vm_fault_busy_sleep(struct faultstate *fs, int allocflags) 1489 { 1490 /* 1491 * Reference the page before unlocking and 1492 * sleeping so that the page daemon is less 1493 * likely to reclaim it. 1494 */ 1495 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1496 if (vm_fault_might_be_cow(fs)) { 1497 vm_fault_page_release(&fs->first_m); 1498 vm_object_pip_wakeup(fs->first_object); 1499 } 1500 vm_object_pip_wakeup(fs->object); 1501 vm_fault_unlock_map(fs); 1502 if (!vm_page_busy_sleep(fs->m, "vmpfw", allocflags)) 1503 VM_OBJECT_UNLOCK(fs->object); 1504 VM_CNT_INC(v_intrans); 1505 vm_object_deallocate(fs->first_object); 1506 } 1507 1508 /* 1509 * Handle page lookup, populate, allocate, page-in for the current 1510 * object. 1511 * 1512 * The object is locked on entry and will remain locked with a return 1513 * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1514 * Otherwise, the object will be unlocked upon return. 1515 */ 1516 static enum fault_status 1517 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1518 { 1519 struct pctrie_iter pages; 1520 enum fault_status res; 1521 bool dead; 1522 1523 if (fs->object == fs->first_object || !fs->can_read_lock) 1524 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1525 else 1526 VM_OBJECT_ASSERT_LOCKED(fs->object); 1527 1528 /* 1529 * If the object is marked for imminent termination, we retry 1530 * here, since the collapse pass has raced with us. Otherwise, 1531 * if we see terminally dead object, return fail. 1532 */ 1533 if ((fs->object->flags & OBJ_DEAD) != 0) { 1534 dead = fs->object->type == OBJT_DEAD; 1535 vm_fault_unlock_and_deallocate(fs); 1536 if (dead) 1537 return (FAULT_PROTECTION_FAILURE); 1538 pause("vmf_de", 1); 1539 return (FAULT_RESTART); 1540 } 1541 1542 /* 1543 * See if the page is resident. 1544 */ 1545 vm_page_iter_init(&pages, fs->object); 1546 fs->m = vm_radix_iter_lookup(&pages, fs->pindex); 1547 if (fs->m != NULL) { 1548 /* 1549 * If the found page is valid, will be either shadowed 1550 * or mapped read-only, and will not be renamed for 1551 * COW, then busy it in shared mode. This allows 1552 * other faults needing this page to proceed in 1553 * parallel. 1554 * 1555 * Unlocked check for validity, rechecked after busy 1556 * is obtained. 1557 */ 1558 if (vm_page_all_valid(fs->m) && 1559 /* 1560 * No write permissions for the new fs->m mapping, 1561 * or the first object has only one mapping, so 1562 * other writeable COW mappings of fs->m cannot 1563 * appear under us. 1564 */ 1565 (vm_fault_is_read(fs) || vm_fault_might_be_cow(fs)) && 1566 /* 1567 * fs->m cannot be renamed from object to 1568 * first_object. These conditions will be 1569 * re-checked with proper synchronization in 1570 * vm_fault_cow(). 1571 */ 1572 (!vm_fault_can_cow_rename(fs) || 1573 fs->object != fs->first_object->backing_object)) { 1574 if (!vm_page_trysbusy(fs->m)) { 1575 vm_fault_busy_sleep(fs, VM_ALLOC_SBUSY); 1576 return (FAULT_RESTART); 1577 } 1578 1579 /* 1580 * Now make sure that racily checked 1581 * conditions are still valid. 1582 */ 1583 if (__predict_true(vm_page_all_valid(fs->m) && 1584 (vm_fault_is_read(fs) || 1585 vm_fault_might_be_cow(fs)))) { 1586 VM_OBJECT_UNLOCK(fs->object); 1587 return (FAULT_SOFT); 1588 } 1589 1590 vm_page_sunbusy(fs->m); 1591 } 1592 1593 if (!vm_page_tryxbusy(fs->m)) { 1594 vm_fault_busy_sleep(fs, 0); 1595 return (FAULT_RESTART); 1596 } 1597 1598 /* 1599 * The page is marked busy for other processes and the 1600 * pagedaemon. If it is still completely valid we are 1601 * done. 1602 */ 1603 if (vm_page_all_valid(fs->m)) { 1604 VM_OBJECT_UNLOCK(fs->object); 1605 return (FAULT_SOFT); 1606 } 1607 } 1608 1609 /* 1610 * Page is not resident. If the pager might contain the page 1611 * or this is the beginning of the search, allocate a new 1612 * page. 1613 */ 1614 if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) || 1615 fs->object == fs->first_object)) { 1616 if (!vm_fault_object_ensure_wlocked(fs)) { 1617 fs->can_read_lock = false; 1618 vm_fault_unlock_and_deallocate(fs); 1619 return (FAULT_RESTART); 1620 } 1621 res = vm_fault_allocate(fs, &pages); 1622 if (res != FAULT_CONTINUE) 1623 return (res); 1624 } 1625 1626 /* 1627 * Check to see if the pager can possibly satisfy this fault. 1628 * If not, skip to the next object without dropping the lock to 1629 * preserve atomicity of shadow faults. 1630 */ 1631 if (vm_fault_object_needs_getpages(fs->object)) { 1632 /* 1633 * At this point, we have either allocated a new page 1634 * or found an existing page that is only partially 1635 * valid. 1636 * 1637 * We hold a reference on the current object and the 1638 * page is exclusive busied. The exclusive busy 1639 * prevents simultaneous faults and collapses while 1640 * the object lock is dropped. 1641 */ 1642 VM_OBJECT_UNLOCK(fs->object); 1643 res = vm_fault_getpages(fs, behindp, aheadp); 1644 if (res == FAULT_CONTINUE) 1645 VM_OBJECT_WLOCK(fs->object); 1646 } else { 1647 res = FAULT_CONTINUE; 1648 } 1649 return (res); 1650 } 1651 1652 /* 1653 * vm_fault: 1654 * 1655 * Handle a page fault occurring at the given address, requiring the 1656 * given permissions, in the map specified. If successful, the page 1657 * is inserted into the associated physical map, and optionally 1658 * referenced and returned in *m_hold. 1659 * 1660 * The given address should be truncated to the proper page address. 1661 * 1662 * KERN_SUCCESS is returned if the page fault is handled; otherwise, a 1663 * Mach error code explaining why the fault is fatal is returned. 1664 * 1665 * The map in question must be alive, either being the map for the current 1666 * process, or the owner process hold count has been incremented to prevent 1667 * exit(). 1668 * 1669 * If the thread private TDP_NOFAULTING flag is set, any fault results 1670 * in immediate protection failure. Otherwise the fault is processed, 1671 * and caller may hold no locks. 1672 */ 1673 int 1674 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1675 int fault_flags, vm_page_t *m_hold) 1676 { 1677 struct pctrie_iter pages; 1678 struct faultstate fs; 1679 int ahead, behind, faultcount, rv; 1680 enum fault_status res; 1681 enum fault_next_status res_next; 1682 bool hardfault; 1683 1684 VM_CNT_INC(v_vm_faults); 1685 1686 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1687 return (KERN_PROTECTION_FAILURE); 1688 1689 fs.vp = NULL; 1690 fs.vaddr = vaddr; 1691 fs.m_hold = m_hold; 1692 fs.fault_flags = fault_flags; 1693 fs.map = map; 1694 fs.lookup_still_valid = false; 1695 fs.m_needs_zeroing = true; 1696 fs.oom_started = false; 1697 fs.nera = -1; 1698 fs.can_read_lock = true; 1699 faultcount = 0; 1700 hardfault = false; 1701 1702 RetryFault: 1703 fs.fault_type = fault_type; 1704 1705 /* 1706 * Find the backing store object and offset into it to begin the 1707 * search. 1708 */ 1709 rv = vm_fault_lookup(&fs); 1710 if (rv != KERN_SUCCESS) { 1711 if (rv == KERN_RESOURCE_SHORTAGE) 1712 goto RetryFault; 1713 return (rv); 1714 } 1715 1716 /* 1717 * Try to avoid lock contention on the top-level object through 1718 * special-case handling of some types of page faults, specifically, 1719 * those that are mapping an existing page from the top-level object. 1720 * Under this condition, a read lock on the object suffices, allowing 1721 * multiple page faults of a similar type to run in parallel. 1722 */ 1723 if (fs.vp == NULL /* avoid locked vnode leak */ && 1724 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1725 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1726 res = vm_fault_soft_fast(&fs); 1727 if (res == FAULT_SUCCESS) { 1728 VM_OBJECT_ASSERT_UNLOCKED(fs.first_object); 1729 return (KERN_SUCCESS); 1730 } 1731 VM_OBJECT_ASSERT_WLOCKED(fs.first_object); 1732 } else { 1733 vm_page_iter_init(&pages, fs.first_object); 1734 VM_OBJECT_WLOCK(fs.first_object); 1735 } 1736 1737 /* 1738 * Make a reference to this object to prevent its disposal while we 1739 * are messing with it. Once we have the reference, the map is free 1740 * to be diddled. Since objects reference their shadows (and copies), 1741 * they will stay around as well. 1742 * 1743 * Bump the paging-in-progress count to prevent size changes (e.g. 1744 * truncation operations) during I/O. 1745 */ 1746 vm_object_reference_locked(fs.first_object); 1747 vm_object_pip_add(fs.first_object, 1); 1748 1749 fs.m_cow = fs.m = fs.first_m = NULL; 1750 1751 /* 1752 * Search for the page at object/offset. 1753 */ 1754 fs.object = fs.first_object; 1755 fs.pindex = fs.first_pindex; 1756 1757 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1758 res = vm_fault_allocate(&fs, &pages); 1759 switch (res) { 1760 case FAULT_RESTART: 1761 goto RetryFault; 1762 case FAULT_SUCCESS: 1763 return (KERN_SUCCESS); 1764 case FAULT_FAILURE: 1765 return (KERN_FAILURE); 1766 case FAULT_OUT_OF_BOUNDS: 1767 return (KERN_OUT_OF_BOUNDS); 1768 case FAULT_CONTINUE: 1769 break; 1770 default: 1771 panic("vm_fault: Unhandled status %d", res); 1772 } 1773 } 1774 1775 while (TRUE) { 1776 KASSERT(fs.m == NULL, 1777 ("page still set %p at loop start", fs.m)); 1778 1779 res = vm_fault_object(&fs, &behind, &ahead); 1780 switch (res) { 1781 case FAULT_SOFT: 1782 goto found; 1783 case FAULT_HARD: 1784 faultcount = behind + 1 + ahead; 1785 hardfault = true; 1786 goto found; 1787 case FAULT_RESTART: 1788 goto RetryFault; 1789 case FAULT_SUCCESS: 1790 return (KERN_SUCCESS); 1791 case FAULT_FAILURE: 1792 return (KERN_FAILURE); 1793 case FAULT_OUT_OF_BOUNDS: 1794 return (KERN_OUT_OF_BOUNDS); 1795 case FAULT_PROTECTION_FAILURE: 1796 return (KERN_PROTECTION_FAILURE); 1797 case FAULT_CONTINUE: 1798 break; 1799 default: 1800 panic("vm_fault: Unhandled status %d", res); 1801 } 1802 1803 /* 1804 * The page was not found in the current object. Try to 1805 * traverse into a backing object or zero fill if none is 1806 * found. 1807 */ 1808 res_next = vm_fault_next(&fs); 1809 if (res_next == FAULT_NEXT_RESTART) 1810 goto RetryFault; 1811 else if (res_next == FAULT_NEXT_GOTOBJ) 1812 continue; 1813 MPASS(res_next == FAULT_NEXT_NOOBJ); 1814 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1815 if (fs.first_object == fs.object) 1816 vm_fault_page_free(&fs.first_m); 1817 vm_fault_unlock_and_deallocate(&fs); 1818 return (KERN_OUT_OF_BOUNDS); 1819 } 1820 VM_OBJECT_UNLOCK(fs.object); 1821 vm_fault_zerofill(&fs); 1822 /* Don't try to prefault neighboring pages. */ 1823 faultcount = 1; 1824 break; 1825 } 1826 1827 found: 1828 /* 1829 * A valid page has been found and busied. The object lock 1830 * must no longer be held if the page was busied. 1831 * 1832 * Regardless of the busy state of fs.m, fs.first_m is always 1833 * exclusively busied after the first iteration of the loop 1834 * calling vm_fault_object(). This is an ordering point for 1835 * the parallel faults occuring in on the same page. 1836 */ 1837 vm_page_assert_busied(fs.m); 1838 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1839 1840 /* 1841 * If the page is being written, but isn't already owned by the 1842 * top-level object, we have to copy it into a new page owned by the 1843 * top-level object. 1844 */ 1845 if (vm_fault_might_be_cow(&fs)) { 1846 /* 1847 * We only really need to copy if we want to write it. 1848 */ 1849 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1850 vm_fault_cow(&fs); 1851 /* 1852 * We only try to prefault read-only mappings to the 1853 * neighboring pages when this copy-on-write fault is 1854 * a hard fault. In other cases, trying to prefault 1855 * is typically wasted effort. 1856 */ 1857 if (faultcount == 0) 1858 faultcount = 1; 1859 1860 } else { 1861 fs.prot &= ~VM_PROT_WRITE; 1862 } 1863 } 1864 1865 /* 1866 * We must verify that the maps have not changed since our last 1867 * lookup. 1868 */ 1869 if (!fs.lookup_still_valid) { 1870 rv = vm_fault_relookup(&fs); 1871 if (rv != KERN_SUCCESS) { 1872 vm_fault_deallocate(&fs); 1873 if (rv == KERN_RESTART) 1874 goto RetryFault; 1875 return (rv); 1876 } 1877 } 1878 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1879 1880 /* 1881 * If the page was filled by a pager, save the virtual address that 1882 * should be faulted on next under a sequential access pattern to the 1883 * map entry. A read lock on the map suffices to update this address 1884 * safely. 1885 */ 1886 if (hardfault) 1887 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1888 1889 /* 1890 * If the page to be mapped was copied from a backing object, we defer 1891 * marking it valid until here, where the fault handler is guaranteed to 1892 * succeed. Otherwise we can end up with a shadowed, mapped page in the 1893 * backing object, which violates an invariant of vm_object_collapse() 1894 * that shadowed pages are not mapped. 1895 */ 1896 if (fs.m_cow != NULL) { 1897 KASSERT(vm_page_none_valid(fs.m), 1898 ("vm_fault: page %p is already valid", fs.m_cow)); 1899 vm_page_valid(fs.m); 1900 } 1901 1902 /* 1903 * Page must be completely valid or it is not fit to 1904 * map into user space. vm_pager_get_pages() ensures this. 1905 */ 1906 vm_page_assert_busied(fs.m); 1907 KASSERT(vm_page_all_valid(fs.m), 1908 ("vm_fault: page %p partially invalid", fs.m)); 1909 1910 vm_fault_dirty(&fs, fs.m); 1911 1912 /* 1913 * Put this page into the physical map. We had to do the unlock above 1914 * because pmap_enter() may sleep. We don't put the page 1915 * back on the active queue until later so that the pageout daemon 1916 * won't find it (yet). 1917 */ 1918 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1919 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1920 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1921 fs.wired == 0) 1922 vm_fault_prefault(&fs, vaddr, 1923 faultcount > 0 ? behind : PFBAK, 1924 faultcount > 0 ? ahead : PFFOR, false); 1925 1926 /* 1927 * If the page is not wired down, then put it where the pageout daemon 1928 * can find it. 1929 */ 1930 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1931 vm_page_wire(fs.m); 1932 else 1933 vm_page_activate(fs.m); 1934 if (fs.m_hold != NULL) { 1935 (*fs.m_hold) = fs.m; 1936 vm_page_wire(fs.m); 1937 } 1938 1939 KASSERT(fs.first_object == fs.object || vm_page_xbusied(fs.first_m), 1940 ("first_m must be xbusy")); 1941 if (vm_page_xbusied(fs.m)) 1942 vm_page_xunbusy(fs.m); 1943 else 1944 vm_page_sunbusy(fs.m); 1945 fs.m = NULL; 1946 1947 /* 1948 * Unlock everything, and return 1949 */ 1950 vm_fault_deallocate(&fs); 1951 if (hardfault) { 1952 VM_CNT_INC(v_io_faults); 1953 curthread->td_ru.ru_majflt++; 1954 #ifdef RACCT 1955 if (racct_enable && fs.object->type == OBJT_VNODE) { 1956 PROC_LOCK(curproc); 1957 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1958 racct_add_force(curproc, RACCT_WRITEBPS, 1959 PAGE_SIZE + behind * PAGE_SIZE); 1960 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1961 } else { 1962 racct_add_force(curproc, RACCT_READBPS, 1963 PAGE_SIZE + ahead * PAGE_SIZE); 1964 racct_add_force(curproc, RACCT_READIOPS, 1); 1965 } 1966 PROC_UNLOCK(curproc); 1967 } 1968 #endif 1969 } else 1970 curthread->td_ru.ru_minflt++; 1971 1972 return (KERN_SUCCESS); 1973 } 1974 1975 /* 1976 * Speed up the reclamation of pages that precede the faulting pindex within 1977 * the first object of the shadow chain. Essentially, perform the equivalent 1978 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1979 * the faulting pindex by the cluster size when the pages read by vm_fault() 1980 * cross a cluster-size boundary. The cluster size is the greater of the 1981 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1982 * 1983 * When "fs->first_object" is a shadow object, the pages in the backing object 1984 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1985 * function must only be concerned with pages in the first object. 1986 */ 1987 static void 1988 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1989 { 1990 struct pctrie_iter pages; 1991 vm_map_entry_t entry; 1992 vm_object_t first_object; 1993 vm_offset_t end, start; 1994 vm_page_t m; 1995 vm_size_t size; 1996 1997 VM_OBJECT_ASSERT_UNLOCKED(fs->object); 1998 first_object = fs->first_object; 1999 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 2000 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 2001 VM_OBJECT_RLOCK(first_object); 2002 size = VM_FAULT_DONTNEED_MIN; 2003 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 2004 size = pagesizes[1]; 2005 end = rounddown2(vaddr, size); 2006 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 2007 (entry = fs->entry)->start < end) { 2008 if (end - entry->start < size) 2009 start = entry->start; 2010 else 2011 start = end - size; 2012 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 2013 vm_page_iter_limit_init(&pages, first_object, 2014 OFF_TO_IDX(entry->offset) + 2015 atop(end - entry->start)); 2016 VM_RADIX_FOREACH_FROM(m, &pages, 2017 OFF_TO_IDX(entry->offset) + 2018 atop(start - entry->start)) { 2019 if (!vm_page_all_valid(m) || 2020 vm_page_busied(m)) 2021 continue; 2022 2023 /* 2024 * Don't clear PGA_REFERENCED, since it would 2025 * likely represent a reference by a different 2026 * process. 2027 * 2028 * Typically, at this point, prefetched pages 2029 * are still in the inactive queue. Only 2030 * pages that triggered page faults are in the 2031 * active queue. The test for whether the page 2032 * is in the inactive queue is racy; in the 2033 * worst case we will requeue the page 2034 * unnecessarily. 2035 */ 2036 if (!vm_page_inactive(m)) 2037 vm_page_deactivate(m); 2038 } 2039 } 2040 VM_OBJECT_RUNLOCK(first_object); 2041 } 2042 } 2043 2044 /* 2045 * vm_fault_prefault provides a quick way of clustering 2046 * pagefaults into a processes address space. It is a "cousin" 2047 * of vm_map_pmap_enter, except it runs at page fault time instead 2048 * of mmap time. 2049 */ 2050 static void 2051 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 2052 int backward, int forward, bool obj_locked) 2053 { 2054 pmap_t pmap; 2055 vm_map_entry_t entry; 2056 vm_object_t backing_object, lobject; 2057 vm_offset_t addr, starta; 2058 vm_pindex_t pindex; 2059 vm_page_t m; 2060 vm_prot_t prot; 2061 int i; 2062 2063 pmap = fs->map->pmap; 2064 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 2065 return; 2066 2067 entry = fs->entry; 2068 2069 if (addra < backward * PAGE_SIZE) { 2070 starta = entry->start; 2071 } else { 2072 starta = addra - backward * PAGE_SIZE; 2073 if (starta < entry->start) 2074 starta = entry->start; 2075 } 2076 prot = entry->protection; 2077 2078 /* 2079 * If pmap_enter() has enabled write access on a nearby mapping, then 2080 * don't attempt promotion, because it will fail. 2081 */ 2082 if ((fs->prot & VM_PROT_WRITE) != 0) 2083 prot |= VM_PROT_NO_PROMOTE; 2084 2085 /* 2086 * Generate the sequence of virtual addresses that are candidates for 2087 * prefaulting in an outward spiral from the faulting virtual address, 2088 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 2089 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 2090 * If the candidate address doesn't have a backing physical page, then 2091 * the loop immediately terminates. 2092 */ 2093 for (i = 0; i < 2 * imax(backward, forward); i++) { 2094 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 2095 PAGE_SIZE); 2096 if (addr > addra + forward * PAGE_SIZE) 2097 addr = 0; 2098 2099 if (addr < starta || addr >= entry->end) 2100 continue; 2101 2102 if (!pmap_is_prefaultable(pmap, addr)) 2103 continue; 2104 2105 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 2106 lobject = entry->object.vm_object; 2107 if (!obj_locked) 2108 VM_OBJECT_RLOCK(lobject); 2109 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 2110 !vm_fault_object_needs_getpages(lobject) && 2111 (backing_object = lobject->backing_object) != NULL) { 2112 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 2113 0, ("vm_fault_prefault: unaligned object offset")); 2114 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 2115 VM_OBJECT_RLOCK(backing_object); 2116 if (!obj_locked || lobject != entry->object.vm_object) 2117 VM_OBJECT_RUNLOCK(lobject); 2118 lobject = backing_object; 2119 } 2120 if (m == NULL) { 2121 if (!obj_locked || lobject != entry->object.vm_object) 2122 VM_OBJECT_RUNLOCK(lobject); 2123 break; 2124 } 2125 if (vm_page_all_valid(m) && 2126 (m->flags & PG_FICTITIOUS) == 0) 2127 pmap_enter_quick(pmap, addr, m, prot); 2128 if (!obj_locked || lobject != entry->object.vm_object) 2129 VM_OBJECT_RUNLOCK(lobject); 2130 } 2131 } 2132 2133 /* 2134 * Hold each of the physical pages that are mapped by the specified 2135 * range of virtual addresses, ["addr", "addr" + "len"), if those 2136 * mappings are valid and allow the specified types of access, "prot". 2137 * If all of the implied pages are successfully held, then the number 2138 * of held pages is assigned to *ppages_count, together with pointers 2139 * to those pages in the array "ma". The returned value is zero. 2140 * 2141 * However, if any of the pages cannot be held, an error is returned, 2142 * and no pages are held. 2143 * Error values: 2144 * ENOMEM - the range is not valid 2145 * EINVAL - the provided vm_page array is too small to hold all pages 2146 * EAGAIN - a page was not mapped, and the thread is in nofaulting mode 2147 * EFAULT - a page with requested permissions cannot be mapped 2148 * (more detailed result from vm_fault() is lost) 2149 */ 2150 int 2151 vm_fault_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 2152 vm_prot_t prot, vm_page_t *ma, int max_count, int *ppages_count) 2153 { 2154 vm_offset_t end, va; 2155 vm_page_t *mp; 2156 int count, error; 2157 boolean_t pmap_failed; 2158 2159 if (len == 0) { 2160 *ppages_count = 0; 2161 return (0); 2162 } 2163 end = round_page(addr + len); 2164 addr = trunc_page(addr); 2165 2166 if (!vm_map_range_valid(map, addr, end)) 2167 return (ENOMEM); 2168 2169 if (atop(end - addr) > max_count) 2170 return (EINVAL); 2171 count = atop(end - addr); 2172 2173 /* 2174 * Most likely, the physical pages are resident in the pmap, so it is 2175 * faster to try pmap_extract_and_hold() first. 2176 */ 2177 pmap_failed = FALSE; 2178 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 2179 *mp = pmap_extract_and_hold(map->pmap, va, prot); 2180 if (*mp == NULL) 2181 pmap_failed = TRUE; 2182 else if ((prot & VM_PROT_WRITE) != 0 && 2183 (*mp)->dirty != VM_PAGE_BITS_ALL) { 2184 /* 2185 * Explicitly dirty the physical page. Otherwise, the 2186 * caller's changes may go unnoticed because they are 2187 * performed through an unmanaged mapping or by a DMA 2188 * operation. 2189 * 2190 * The object lock is not held here. 2191 * See vm_page_clear_dirty_mask(). 2192 */ 2193 vm_page_dirty(*mp); 2194 } 2195 } 2196 if (pmap_failed) { 2197 /* 2198 * One or more pages could not be held by the pmap. Either no 2199 * page was mapped at the specified virtual address or that 2200 * mapping had insufficient permissions. Attempt to fault in 2201 * and hold these pages. 2202 * 2203 * If vm_fault_disable_pagefaults() was called, 2204 * i.e., TDP_NOFAULTING is set, we must not sleep nor 2205 * acquire MD VM locks, which means we must not call 2206 * vm_fault(). Some (out of tree) callers mark 2207 * too wide a code area with vm_fault_disable_pagefaults() 2208 * already, use the VM_PROT_QUICK_NOFAULT flag to request 2209 * the proper behaviour explicitly. 2210 */ 2211 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 2212 (curthread->td_pflags & TDP_NOFAULTING) != 0) { 2213 error = EAGAIN; 2214 goto fail; 2215 } 2216 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 2217 if (*mp == NULL && vm_fault(map, va, prot, 2218 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) { 2219 error = EFAULT; 2220 goto fail; 2221 } 2222 } 2223 } 2224 *ppages_count = count; 2225 return (0); 2226 fail: 2227 for (mp = ma; mp < ma + count; mp++) 2228 if (*mp != NULL) 2229 vm_page_unwire(*mp, PQ_INACTIVE); 2230 return (error); 2231 } 2232 2233 /* 2234 * Hold each of the physical pages that are mapped by the specified range of 2235 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 2236 * and allow the specified types of access, "prot". If all of the implied 2237 * pages are successfully held, then the number of held pages is returned 2238 * together with pointers to those pages in the array "ma". However, if any 2239 * of the pages cannot be held, -1 is returned. 2240 */ 2241 int 2242 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 2243 vm_prot_t prot, vm_page_t *ma, int max_count) 2244 { 2245 int error, pages_count; 2246 2247 error = vm_fault_hold_pages(map, addr, len, prot, ma, 2248 max_count, &pages_count); 2249 if (error != 0) { 2250 if (error == EINVAL) 2251 panic("vm_fault_quick_hold_pages: count > max_count"); 2252 return (-1); 2253 } 2254 return (pages_count); 2255 } 2256 2257 /* 2258 * Routine: 2259 * vm_fault_copy_entry 2260 * Function: 2261 * Create new object backing dst_entry with private copy of all 2262 * underlying pages. When src_entry is equal to dst_entry, function 2263 * implements COW for wired-down map entry. Otherwise, it forks 2264 * wired entry into dst_map. 2265 * 2266 * In/out conditions: 2267 * The source and destination maps must be locked for write. 2268 * The source map entry must be wired down (or be a sharing map 2269 * entry corresponding to a main map entry that is wired down). 2270 */ 2271 void 2272 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, 2273 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 2274 vm_ooffset_t *fork_charge) 2275 { 2276 struct pctrie_iter pages; 2277 vm_object_t backing_object, dst_object, object, src_object; 2278 vm_pindex_t dst_pindex, pindex, src_pindex; 2279 vm_prot_t access, prot; 2280 vm_offset_t vaddr; 2281 vm_page_t dst_m; 2282 vm_page_t src_m; 2283 bool upgrade; 2284 2285 upgrade = src_entry == dst_entry; 2286 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 2287 ("vm_fault_copy_entry: vm_object not NULL")); 2288 2289 /* 2290 * If not an upgrade, then enter the mappings in the pmap as 2291 * read and/or execute accesses. Otherwise, enter them as 2292 * write accesses. 2293 * 2294 * A writeable large page mapping is only created if all of 2295 * the constituent small page mappings are modified. Marking 2296 * PTEs as modified on inception allows promotion to happen 2297 * without taking potentially large number of soft faults. 2298 */ 2299 access = prot = dst_entry->protection; 2300 if (!upgrade) 2301 access &= ~VM_PROT_WRITE; 2302 2303 src_object = src_entry->object.vm_object; 2304 src_pindex = OFF_TO_IDX(src_entry->offset); 2305 2306 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2307 dst_object = src_object; 2308 vm_object_reference(dst_object); 2309 } else { 2310 /* 2311 * Create the top-level object for the destination entry. 2312 * Doesn't actually shadow anything - we copy the pages 2313 * directly. 2314 */ 2315 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 2316 dst_entry->start), NULL, NULL); 2317 #if VM_NRESERVLEVEL > 0 2318 dst_object->flags |= OBJ_COLORED; 2319 dst_object->pg_color = atop(dst_entry->start); 2320 #endif 2321 dst_object->domain = src_object->domain; 2322 2323 dst_entry->object.vm_object = dst_object; 2324 dst_entry->offset = 0; 2325 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 2326 } 2327 2328 VM_OBJECT_WLOCK(dst_object); 2329 if (fork_charge != NULL) { 2330 KASSERT(dst_entry->cred == NULL, 2331 ("vm_fault_copy_entry: leaked swp charge")); 2332 dst_object->cred = curthread->td_ucred; 2333 crhold(dst_object->cred); 2334 *fork_charge += ptoa(dst_object->size); 2335 } else if ((dst_object->flags & OBJ_SWAP) != 0 && 2336 dst_object->cred == NULL) { 2337 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 2338 dst_entry)); 2339 dst_object->cred = dst_entry->cred; 2340 dst_entry->cred = NULL; 2341 } 2342 2343 /* 2344 * Loop through all of the virtual pages within the entry's 2345 * range, copying each page from the source object to the 2346 * destination object. Since the source is wired, those pages 2347 * must exist. In contrast, the destination is pageable. 2348 * Since the destination object doesn't share any backing storage 2349 * with the source object, all of its pages must be dirtied, 2350 * regardless of whether they can be written. 2351 */ 2352 vm_page_iter_init(&pages, dst_object); 2353 for (vaddr = dst_entry->start, dst_pindex = 0; 2354 vaddr < dst_entry->end; 2355 vaddr += PAGE_SIZE, dst_pindex++) { 2356 again: 2357 /* 2358 * Find the page in the source object, and copy it in. 2359 * Because the source is wired down, the page will be 2360 * in memory. 2361 */ 2362 if (src_object != dst_object) 2363 VM_OBJECT_RLOCK(src_object); 2364 object = src_object; 2365 pindex = src_pindex + dst_pindex; 2366 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2367 (backing_object = object->backing_object) != NULL) { 2368 /* 2369 * Unless the source mapping is read-only or 2370 * it is presently being upgraded from 2371 * read-only, the first object in the shadow 2372 * chain should provide all of the pages. In 2373 * other words, this loop body should never be 2374 * executed when the source mapping is already 2375 * read/write. 2376 */ 2377 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 2378 upgrade, 2379 ("vm_fault_copy_entry: main object missing page")); 2380 2381 VM_OBJECT_RLOCK(backing_object); 2382 pindex += OFF_TO_IDX(object->backing_object_offset); 2383 if (object != dst_object) 2384 VM_OBJECT_RUNLOCK(object); 2385 object = backing_object; 2386 } 2387 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 2388 2389 if (object != dst_object) { 2390 /* 2391 * Allocate a page in the destination object. 2392 */ 2393 pindex = (src_object == dst_object ? src_pindex : 0) + 2394 dst_pindex; 2395 dst_m = vm_page_alloc_iter(dst_object, pindex, 2396 VM_ALLOC_NORMAL, &pages); 2397 if (dst_m == NULL) { 2398 VM_OBJECT_WUNLOCK(dst_object); 2399 VM_OBJECT_RUNLOCK(object); 2400 vm_wait(dst_object); 2401 VM_OBJECT_WLOCK(dst_object); 2402 pctrie_iter_reset(&pages); 2403 goto again; 2404 } 2405 2406 /* 2407 * See the comment in vm_fault_cow(). 2408 */ 2409 if (src_object == dst_object && 2410 (object->flags & OBJ_ONEMAPPING) == 0) 2411 pmap_remove_all(src_m); 2412 pmap_copy_page(src_m, dst_m); 2413 2414 /* 2415 * The object lock does not guarantee that "src_m" will 2416 * transition from invalid to valid, but it does ensure 2417 * that "src_m" will not transition from valid to 2418 * invalid. 2419 */ 2420 dst_m->dirty = dst_m->valid = src_m->valid; 2421 VM_OBJECT_RUNLOCK(object); 2422 } else { 2423 dst_m = src_m; 2424 if (vm_page_busy_acquire( 2425 dst_m, VM_ALLOC_WAITFAIL) == 0) { 2426 pctrie_iter_reset(&pages); 2427 goto again; 2428 } 2429 if (dst_m->pindex >= dst_object->size) { 2430 /* 2431 * We are upgrading. Index can occur 2432 * out of bounds if the object type is 2433 * vnode and the file was truncated. 2434 */ 2435 vm_page_xunbusy(dst_m); 2436 break; 2437 } 2438 } 2439 2440 /* 2441 * Enter it in the pmap. If a wired, copy-on-write 2442 * mapping is being replaced by a write-enabled 2443 * mapping, then wire that new mapping. 2444 * 2445 * The page can be invalid if the user called 2446 * msync(MS_INVALIDATE) or truncated the backing vnode 2447 * or shared memory object. In this case, do not 2448 * insert it into pmap, but still do the copy so that 2449 * all copies of the wired map entry have similar 2450 * backing pages. 2451 */ 2452 if (vm_page_all_valid(dst_m)) { 2453 VM_OBJECT_WUNLOCK(dst_object); 2454 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2455 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2456 VM_OBJECT_WLOCK(dst_object); 2457 } 2458 2459 /* 2460 * Mark it no longer busy, and put it on the active list. 2461 */ 2462 if (upgrade) { 2463 if (src_m != dst_m) { 2464 vm_page_unwire(src_m, PQ_INACTIVE); 2465 vm_page_wire(dst_m); 2466 } else { 2467 KASSERT(vm_page_wired(dst_m), 2468 ("dst_m %p is not wired", dst_m)); 2469 } 2470 } else { 2471 vm_page_activate(dst_m); 2472 } 2473 vm_page_xunbusy(dst_m); 2474 } 2475 VM_OBJECT_WUNLOCK(dst_object); 2476 if (upgrade) { 2477 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2478 vm_object_deallocate(src_object); 2479 } 2480 } 2481 2482 /* 2483 * Block entry into the machine-independent layer's page fault handler by 2484 * the calling thread. Subsequent calls to vm_fault() by that thread will 2485 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2486 * spurious page faults. 2487 */ 2488 int 2489 vm_fault_disable_pagefaults(void) 2490 { 2491 2492 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2493 } 2494 2495 void 2496 vm_fault_enable_pagefaults(int save) 2497 { 2498 2499 curthread_pflags_restore(save); 2500 } 2501