1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * Page fault handling module. 72 */ 73 74 #include <sys/cdefs.h> 75 #include "opt_ktrace.h" 76 #include "opt_vm.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/lock.h> 82 #include <sys/mman.h> 83 #include <sys/mutex.h> 84 #include <sys/pctrie.h> 85 #include <sys/proc.h> 86 #include <sys/racct.h> 87 #include <sys/refcount.h> 88 #include <sys/resourcevar.h> 89 #include <sys/rwlock.h> 90 #include <sys/signalvar.h> 91 #include <sys/sysctl.h> 92 #include <sys/sysent.h> 93 #include <sys/vmmeter.h> 94 #include <sys/vnode.h> 95 #ifdef KTRACE 96 #include <sys/ktrace.h> 97 #endif 98 99 #include <vm/vm.h> 100 #include <vm/vm_param.h> 101 #include <vm/pmap.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_object.h> 104 #include <vm/vm_page.h> 105 #include <vm/vm_pageout.h> 106 #include <vm/vm_kern.h> 107 #include <vm/vm_pager.h> 108 #include <vm/vm_radix.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_reserv.h> 111 112 #define PFBAK 4 113 #define PFFOR 4 114 115 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 116 117 #define VM_FAULT_DONTNEED_MIN 1048576 118 119 struct faultstate { 120 /* Fault parameters. */ 121 vm_offset_t vaddr; 122 vm_page_t *m_hold; 123 vm_prot_t fault_type; 124 vm_prot_t prot; 125 int fault_flags; 126 boolean_t wired; 127 128 /* Control state. */ 129 struct timeval oom_start_time; 130 bool oom_started; 131 int nera; 132 bool can_read_lock; 133 134 /* Page reference for cow. */ 135 vm_page_t m_cow; 136 137 /* Current object. */ 138 vm_object_t object; 139 vm_pindex_t pindex; 140 vm_page_t m; 141 142 /* Top-level map object. */ 143 vm_object_t first_object; 144 vm_pindex_t first_pindex; 145 vm_page_t first_m; 146 147 /* Map state. */ 148 vm_map_t map; 149 vm_map_entry_t entry; 150 int map_generation; 151 bool lookup_still_valid; 152 153 /* Vnode if locked. */ 154 struct vnode *vp; 155 }; 156 157 /* 158 * Return codes for internal fault routines. 159 */ 160 enum fault_status { 161 FAULT_SUCCESS = 10000, /* Return success to user. */ 162 FAULT_FAILURE, /* Return failure to user. */ 163 FAULT_CONTINUE, /* Continue faulting. */ 164 FAULT_RESTART, /* Restart fault. */ 165 FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 166 FAULT_HARD, /* Performed I/O. */ 167 FAULT_SOFT, /* Found valid page. */ 168 FAULT_PROTECTION_FAILURE, /* Invalid access. */ 169 }; 170 171 enum fault_next_status { 172 FAULT_NEXT_GOTOBJ = 1, 173 FAULT_NEXT_NOOBJ, 174 FAULT_NEXT_RESTART, 175 }; 176 177 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 178 int ahead); 179 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 180 int backward, int forward, bool obj_locked); 181 182 static int vm_pfault_oom_attempts = 3; 183 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 184 &vm_pfault_oom_attempts, 0, 185 "Number of page allocation attempts in page fault handler before it " 186 "triggers OOM handling"); 187 188 static int vm_pfault_oom_wait = 10; 189 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 190 &vm_pfault_oom_wait, 0, 191 "Number of seconds to wait for free pages before retrying " 192 "the page fault handler"); 193 194 static inline void 195 vm_fault_page_release(vm_page_t *mp) 196 { 197 vm_page_t m; 198 199 m = *mp; 200 if (m != NULL) { 201 /* 202 * We are likely to loop around again and attempt to busy 203 * this page. Deactivating it leaves it available for 204 * pageout while optimizing fault restarts. 205 */ 206 vm_page_deactivate(m); 207 vm_page_xunbusy(m); 208 *mp = NULL; 209 } 210 } 211 212 static inline void 213 vm_fault_page_free(vm_page_t *mp) 214 { 215 vm_page_t m; 216 217 m = *mp; 218 if (m != NULL) { 219 VM_OBJECT_ASSERT_WLOCKED(m->object); 220 if (!vm_page_wired(m)) 221 vm_page_free(m); 222 else 223 vm_page_xunbusy(m); 224 *mp = NULL; 225 } 226 } 227 228 /* 229 * Return true if a vm_pager_get_pages() call is needed in order to check 230 * whether the pager might have a particular page, false if it can be determined 231 * immediately that the pager can not have a copy. For swap objects, this can 232 * be checked quickly. 233 */ 234 static inline bool 235 vm_fault_object_needs_getpages(vm_object_t object) 236 { 237 VM_OBJECT_ASSERT_LOCKED(object); 238 239 return ((object->flags & OBJ_SWAP) == 0 || 240 !pctrie_is_empty(&object->un_pager.swp.swp_blks)); 241 } 242 243 static inline void 244 vm_fault_unlock_map(struct faultstate *fs) 245 { 246 247 if (fs->lookup_still_valid) { 248 vm_map_lookup_done(fs->map, fs->entry); 249 fs->lookup_still_valid = false; 250 } 251 } 252 253 static void 254 vm_fault_unlock_vp(struct faultstate *fs) 255 { 256 257 if (fs->vp != NULL) { 258 vput(fs->vp); 259 fs->vp = NULL; 260 } 261 } 262 263 static void 264 vm_fault_deallocate(struct faultstate *fs) 265 { 266 267 vm_fault_page_release(&fs->m_cow); 268 vm_fault_page_release(&fs->m); 269 vm_object_pip_wakeup(fs->object); 270 if (fs->object != fs->first_object) { 271 VM_OBJECT_WLOCK(fs->first_object); 272 vm_fault_page_free(&fs->first_m); 273 VM_OBJECT_WUNLOCK(fs->first_object); 274 vm_object_pip_wakeup(fs->first_object); 275 } 276 vm_object_deallocate(fs->first_object); 277 vm_fault_unlock_map(fs); 278 vm_fault_unlock_vp(fs); 279 } 280 281 static void 282 vm_fault_unlock_and_deallocate(struct faultstate *fs) 283 { 284 285 VM_OBJECT_UNLOCK(fs->object); 286 vm_fault_deallocate(fs); 287 } 288 289 static void 290 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 291 { 292 bool need_dirty; 293 294 if (((fs->prot & VM_PROT_WRITE) == 0 && 295 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 296 (m->oflags & VPO_UNMANAGED) != 0) 297 return; 298 299 VM_PAGE_OBJECT_BUSY_ASSERT(m); 300 301 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 302 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 303 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 304 305 vm_object_set_writeable_dirty(m->object); 306 307 /* 308 * If the fault is a write, we know that this page is being 309 * written NOW so dirty it explicitly to save on 310 * pmap_is_modified() calls later. 311 * 312 * Also, since the page is now dirty, we can possibly tell 313 * the pager to release any swap backing the page. 314 */ 315 if (need_dirty && vm_page_set_dirty(m) == 0) { 316 /* 317 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 318 * if the page is already dirty to prevent data written with 319 * the expectation of being synced from not being synced. 320 * Likewise if this entry does not request NOSYNC then make 321 * sure the page isn't marked NOSYNC. Applications sharing 322 * data should use the same flags to avoid ping ponging. 323 */ 324 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 325 vm_page_aflag_set(m, PGA_NOSYNC); 326 else 327 vm_page_aflag_clear(m, PGA_NOSYNC); 328 } 329 330 } 331 332 /* 333 * Unlocks fs.first_object and fs.map on success. 334 */ 335 static enum fault_status 336 vm_fault_soft_fast(struct faultstate *fs) 337 { 338 vm_page_t m, m_map; 339 #if VM_NRESERVLEVEL > 0 340 vm_page_t m_super; 341 int flags; 342 #endif 343 int psind; 344 vm_offset_t vaddr; 345 346 MPASS(fs->vp == NULL); 347 348 /* 349 * If we fail, vast majority of the time it is because the page is not 350 * there to begin with. Opportunistically perform the lookup and 351 * subsequent checks without the object lock, revalidate later. 352 * 353 * Note: a busy page can be mapped for read|execute access. 354 */ 355 m = vm_page_lookup_unlocked(fs->first_object, fs->first_pindex); 356 if (m == NULL || !vm_page_all_valid(m) || 357 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) { 358 VM_OBJECT_WLOCK(fs->first_object); 359 return (FAULT_FAILURE); 360 } 361 362 vaddr = fs->vaddr; 363 364 VM_OBJECT_RLOCK(fs->first_object); 365 366 /* 367 * Now that we stabilized the state, revalidate the page is in the shape 368 * we encountered above. 369 */ 370 371 if (m->object != fs->first_object || m->pindex != fs->first_pindex) 372 goto fail; 373 374 vm_object_busy(fs->first_object); 375 376 if (!vm_page_all_valid(m) || 377 ((fs->prot & VM_PROT_WRITE) != 0 && vm_page_busied(m))) 378 goto fail_busy; 379 380 m_map = m; 381 psind = 0; 382 #if VM_NRESERVLEVEL > 0 383 if ((m->flags & PG_FICTITIOUS) == 0 && 384 (m_super = vm_reserv_to_superpage(m)) != NULL) { 385 psind = m_super->psind; 386 KASSERT(psind > 0, 387 ("psind %d of m_super %p < 1", psind, m_super)); 388 flags = PS_ALL_VALID; 389 if ((fs->prot & VM_PROT_WRITE) != 0) { 390 /* 391 * Create a superpage mapping allowing write access 392 * only if none of the constituent pages are busy and 393 * all of them are already dirty (except possibly for 394 * the page that was faulted on). 395 */ 396 flags |= PS_NONE_BUSY; 397 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 398 flags |= PS_ALL_DIRTY; 399 } 400 while (rounddown2(vaddr, pagesizes[psind]) < fs->entry->start || 401 roundup2(vaddr + 1, pagesizes[psind]) > fs->entry->end || 402 (vaddr & (pagesizes[psind] - 1)) != 403 (VM_PAGE_TO_PHYS(m) & (pagesizes[psind] - 1)) || 404 !vm_page_ps_test(m_super, psind, flags, m) || 405 !pmap_ps_enabled(fs->map->pmap)) { 406 psind--; 407 if (psind == 0) 408 break; 409 m_super += rounddown2(m - m_super, 410 atop(pagesizes[psind])); 411 KASSERT(m_super->psind >= psind, 412 ("psind %d of m_super %p < %d", m_super->psind, 413 m_super, psind)); 414 } 415 if (psind > 0) { 416 m_map = m_super; 417 vaddr = rounddown2(vaddr, pagesizes[psind]); 418 /* Preset the modified bit for dirty superpages. */ 419 if ((flags & PS_ALL_DIRTY) != 0) 420 fs->fault_type |= VM_PROT_WRITE; 421 } 422 } 423 #endif 424 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 425 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 426 KERN_SUCCESS) 427 goto fail_busy; 428 if (fs->m_hold != NULL) { 429 (*fs->m_hold) = m; 430 vm_page_wire(m); 431 } 432 if (psind == 0 && !fs->wired) 433 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 434 VM_OBJECT_RUNLOCK(fs->first_object); 435 vm_fault_dirty(fs, m); 436 vm_object_unbusy(fs->first_object); 437 vm_map_lookup_done(fs->map, fs->entry); 438 curthread->td_ru.ru_minflt++; 439 return (FAULT_SUCCESS); 440 fail_busy: 441 vm_object_unbusy(fs->first_object); 442 fail: 443 if (!VM_OBJECT_TRYUPGRADE(fs->first_object)) { 444 VM_OBJECT_RUNLOCK(fs->first_object); 445 VM_OBJECT_WLOCK(fs->first_object); 446 } 447 return (FAULT_FAILURE); 448 } 449 450 static void 451 vm_fault_restore_map_lock(struct faultstate *fs) 452 { 453 454 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 455 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 456 457 if (!vm_map_trylock_read(fs->map)) { 458 VM_OBJECT_WUNLOCK(fs->first_object); 459 vm_map_lock_read(fs->map); 460 VM_OBJECT_WLOCK(fs->first_object); 461 } 462 fs->lookup_still_valid = true; 463 } 464 465 static void 466 vm_fault_populate_check_page(vm_page_t m) 467 { 468 469 /* 470 * Check each page to ensure that the pager is obeying the 471 * interface: the page must be installed in the object, fully 472 * valid, and exclusively busied. 473 */ 474 MPASS(m != NULL); 475 MPASS(vm_page_all_valid(m)); 476 MPASS(vm_page_xbusied(m)); 477 } 478 479 static void 480 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 481 vm_pindex_t last) 482 { 483 struct pctrie_iter pages; 484 vm_page_t m; 485 486 VM_OBJECT_ASSERT_WLOCKED(object); 487 MPASS(first <= last); 488 vm_page_iter_limit_init(&pages, object, last + 1); 489 VM_RADIX_FORALL_FROM(m, &pages, first) { 490 vm_fault_populate_check_page(m); 491 vm_page_deactivate(m); 492 vm_page_xunbusy(m); 493 } 494 KASSERT(pages.index == last, ("%s: pindex mismatch", __func__)); 495 } 496 497 static enum fault_status 498 vm_fault_populate(struct faultstate *fs) 499 { 500 vm_offset_t vaddr; 501 vm_page_t m; 502 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 503 int bdry_idx, i, npages, psind, rv; 504 enum fault_status res; 505 506 MPASS(fs->object == fs->first_object); 507 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 508 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 509 MPASS(fs->first_object->backing_object == NULL); 510 MPASS(fs->lookup_still_valid); 511 512 pager_first = OFF_TO_IDX(fs->entry->offset); 513 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 514 vm_fault_unlock_map(fs); 515 vm_fault_unlock_vp(fs); 516 517 res = FAULT_SUCCESS; 518 519 /* 520 * Call the pager (driver) populate() method. 521 * 522 * There is no guarantee that the method will be called again 523 * if the current fault is for read, and a future fault is 524 * for write. Report the entry's maximum allowed protection 525 * to the driver. 526 */ 527 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 528 fs->fault_type, fs->entry->max_protection, &pager_first, 529 &pager_last); 530 531 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 532 if (rv == VM_PAGER_BAD) { 533 /* 534 * VM_PAGER_BAD is the backdoor for a pager to request 535 * normal fault handling. 536 */ 537 vm_fault_restore_map_lock(fs); 538 if (fs->map->timestamp != fs->map_generation) 539 return (FAULT_RESTART); 540 return (FAULT_CONTINUE); 541 } 542 if (rv != VM_PAGER_OK) 543 return (FAULT_FAILURE); /* AKA SIGSEGV */ 544 545 /* Ensure that the driver is obeying the interface. */ 546 MPASS(pager_first <= pager_last); 547 MPASS(fs->first_pindex <= pager_last); 548 MPASS(fs->first_pindex >= pager_first); 549 MPASS(pager_last < fs->first_object->size); 550 551 vm_fault_restore_map_lock(fs); 552 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(fs->entry); 553 if (fs->map->timestamp != fs->map_generation) { 554 if (bdry_idx == 0) { 555 vm_fault_populate_cleanup(fs->first_object, pager_first, 556 pager_last); 557 } else { 558 m = vm_page_lookup(fs->first_object, pager_first); 559 if (m != fs->m) 560 vm_page_xunbusy(m); 561 } 562 return (FAULT_RESTART); 563 } 564 565 /* 566 * The map is unchanged after our last unlock. Process the fault. 567 * 568 * First, the special case of largepage mappings, where 569 * populate only busies the first page in superpage run. 570 */ 571 if (bdry_idx != 0) { 572 KASSERT(PMAP_HAS_LARGEPAGES, 573 ("missing pmap support for large pages")); 574 m = vm_page_lookup(fs->first_object, pager_first); 575 vm_fault_populate_check_page(m); 576 VM_OBJECT_WUNLOCK(fs->first_object); 577 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 578 fs->entry->offset; 579 /* assert alignment for entry */ 580 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 581 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 582 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 583 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 584 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 585 ("unaligned superpage m %p %#jx", m, 586 (uintmax_t)VM_PAGE_TO_PHYS(m))); 587 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 588 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 589 PMAP_ENTER_LARGEPAGE, bdry_idx); 590 VM_OBJECT_WLOCK(fs->first_object); 591 vm_page_xunbusy(m); 592 if (rv != KERN_SUCCESS) { 593 res = FAULT_FAILURE; 594 goto out; 595 } 596 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 597 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 598 vm_page_wire(m + i); 599 } 600 if (fs->m_hold != NULL) { 601 *fs->m_hold = m + (fs->first_pindex - pager_first); 602 vm_page_wire(*fs->m_hold); 603 } 604 goto out; 605 } 606 607 /* 608 * The range [pager_first, pager_last] that is given to the 609 * pager is only a hint. The pager may populate any range 610 * within the object that includes the requested page index. 611 * In case the pager expanded the range, clip it to fit into 612 * the map entry. 613 */ 614 map_first = OFF_TO_IDX(fs->entry->offset); 615 if (map_first > pager_first) { 616 vm_fault_populate_cleanup(fs->first_object, pager_first, 617 map_first - 1); 618 pager_first = map_first; 619 } 620 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 621 if (map_last < pager_last) { 622 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 623 pager_last); 624 pager_last = map_last; 625 } 626 for (pidx = pager_first; pidx <= pager_last; pidx += npages) { 627 m = vm_page_lookup(fs->first_object, pidx); 628 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 629 KASSERT(m != NULL && m->pindex == pidx, 630 ("%s: pindex mismatch", __func__)); 631 psind = m->psind; 632 while (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 633 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 634 !pmap_ps_enabled(fs->map->pmap))) 635 psind--; 636 637 npages = atop(pagesizes[psind]); 638 for (i = 0; i < npages; i++) { 639 vm_fault_populate_check_page(&m[i]); 640 vm_fault_dirty(fs, &m[i]); 641 } 642 VM_OBJECT_WUNLOCK(fs->first_object); 643 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 644 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 645 646 /* 647 * pmap_enter() may fail for a superpage mapping if additional 648 * protection policies prevent the full mapping. 649 * For example, this will happen on amd64 if the entire 650 * address range does not share the same userspace protection 651 * key. Revert to single-page mappings if this happens. 652 */ 653 MPASS(rv == KERN_SUCCESS || 654 (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 655 if (__predict_false(psind > 0 && 656 rv == KERN_PROTECTION_FAILURE)) { 657 MPASS(!fs->wired); 658 for (i = 0; i < npages; i++) { 659 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 660 &m[i], fs->prot, fs->fault_type, 0); 661 MPASS(rv == KERN_SUCCESS); 662 } 663 } 664 665 VM_OBJECT_WLOCK(fs->first_object); 666 for (i = 0; i < npages; i++) { 667 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && 668 m[i].pindex == fs->first_pindex) 669 vm_page_wire(&m[i]); 670 else 671 vm_page_activate(&m[i]); 672 if (fs->m_hold != NULL && 673 m[i].pindex == fs->first_pindex) { 674 (*fs->m_hold) = &m[i]; 675 vm_page_wire(&m[i]); 676 } 677 vm_page_xunbusy(&m[i]); 678 } 679 } 680 out: 681 curthread->td_ru.ru_majflt++; 682 return (res); 683 } 684 685 static int prot_fault_translation; 686 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 687 &prot_fault_translation, 0, 688 "Control signal to deliver on protection fault"); 689 690 /* compat definition to keep common code for signal translation */ 691 #define UCODE_PAGEFLT 12 692 #ifdef T_PAGEFLT 693 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 694 #endif 695 696 /* 697 * vm_fault_trap: 698 * 699 * Handle a page fault occurring at the given address, 700 * requiring the given permissions, in the map specified. 701 * If successful, the page is inserted into the 702 * associated physical map. 703 * 704 * NOTE: the given address should be truncated to the 705 * proper page address. 706 * 707 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 708 * a standard error specifying why the fault is fatal is returned. 709 * 710 * The map in question must be referenced, and remains so. 711 * Caller may hold no locks. 712 */ 713 int 714 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 715 int fault_flags, int *signo, int *ucode) 716 { 717 int result; 718 719 MPASS(signo == NULL || ucode != NULL); 720 #ifdef KTRACE 721 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 722 ktrfault(vaddr, fault_type); 723 #endif 724 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 725 NULL); 726 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 727 result == KERN_INVALID_ADDRESS || 728 result == KERN_RESOURCE_SHORTAGE || 729 result == KERN_PROTECTION_FAILURE || 730 result == KERN_OUT_OF_BOUNDS, 731 ("Unexpected Mach error %d from vm_fault()", result)); 732 #ifdef KTRACE 733 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 734 ktrfaultend(result); 735 #endif 736 if (result != KERN_SUCCESS && signo != NULL) { 737 switch (result) { 738 case KERN_FAILURE: 739 case KERN_INVALID_ADDRESS: 740 *signo = SIGSEGV; 741 *ucode = SEGV_MAPERR; 742 break; 743 case KERN_RESOURCE_SHORTAGE: 744 *signo = SIGBUS; 745 *ucode = BUS_OOMERR; 746 break; 747 case KERN_OUT_OF_BOUNDS: 748 *signo = SIGBUS; 749 *ucode = BUS_OBJERR; 750 break; 751 case KERN_PROTECTION_FAILURE: 752 if (prot_fault_translation == 0) { 753 /* 754 * Autodetect. This check also covers 755 * the images without the ABI-tag ELF 756 * note. 757 */ 758 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 759 curproc->p_osrel >= P_OSREL_SIGSEGV) { 760 *signo = SIGSEGV; 761 *ucode = SEGV_ACCERR; 762 } else { 763 *signo = SIGBUS; 764 *ucode = UCODE_PAGEFLT; 765 } 766 } else if (prot_fault_translation == 1) { 767 /* Always compat mode. */ 768 *signo = SIGBUS; 769 *ucode = UCODE_PAGEFLT; 770 } else { 771 /* Always SIGSEGV mode. */ 772 *signo = SIGSEGV; 773 *ucode = SEGV_ACCERR; 774 } 775 break; 776 default: 777 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 778 result)); 779 break; 780 } 781 } 782 return (result); 783 } 784 785 static bool 786 vm_fault_object_ensure_wlocked(struct faultstate *fs) 787 { 788 if (fs->object == fs->first_object) 789 VM_OBJECT_ASSERT_WLOCKED(fs->object); 790 791 if (!fs->can_read_lock) { 792 VM_OBJECT_ASSERT_WLOCKED(fs->object); 793 return (true); 794 } 795 796 if (VM_OBJECT_WOWNED(fs->object)) 797 return (true); 798 799 if (VM_OBJECT_TRYUPGRADE(fs->object)) 800 return (true); 801 802 return (false); 803 } 804 805 static enum fault_status 806 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 807 { 808 struct vnode *vp; 809 int error, locked; 810 811 if (fs->object->type != OBJT_VNODE) 812 return (FAULT_CONTINUE); 813 vp = fs->object->handle; 814 if (vp == fs->vp) { 815 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 816 return (FAULT_CONTINUE); 817 } 818 819 /* 820 * Perform an unlock in case the desired vnode changed while 821 * the map was unlocked during a retry. 822 */ 823 vm_fault_unlock_vp(fs); 824 825 locked = VOP_ISLOCKED(vp); 826 if (locked != LK_EXCLUSIVE) 827 locked = LK_SHARED; 828 829 /* 830 * We must not sleep acquiring the vnode lock while we have 831 * the page exclusive busied or the object's 832 * paging-in-progress count incremented. Otherwise, we could 833 * deadlock. 834 */ 835 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 836 if (error == 0) { 837 fs->vp = vp; 838 return (FAULT_CONTINUE); 839 } 840 841 vhold(vp); 842 if (objlocked) 843 vm_fault_unlock_and_deallocate(fs); 844 else 845 vm_fault_deallocate(fs); 846 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 847 vdrop(vp); 848 fs->vp = vp; 849 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 850 return (FAULT_RESTART); 851 } 852 853 /* 854 * Calculate the desired readahead. Handle drop-behind. 855 * 856 * Returns the number of readahead blocks to pass to the pager. 857 */ 858 static int 859 vm_fault_readahead(struct faultstate *fs) 860 { 861 int era, nera; 862 u_char behavior; 863 864 KASSERT(fs->lookup_still_valid, ("map unlocked")); 865 era = fs->entry->read_ahead; 866 behavior = vm_map_entry_behavior(fs->entry); 867 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 868 nera = 0; 869 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 870 nera = VM_FAULT_READ_AHEAD_MAX; 871 if (fs->vaddr == fs->entry->next_read) 872 vm_fault_dontneed(fs, fs->vaddr, nera); 873 } else if (fs->vaddr == fs->entry->next_read) { 874 /* 875 * This is a sequential fault. Arithmetically 876 * increase the requested number of pages in 877 * the read-ahead window. The requested 878 * number of pages is "# of sequential faults 879 * x (read ahead min + 1) + read ahead min" 880 */ 881 nera = VM_FAULT_READ_AHEAD_MIN; 882 if (era > 0) { 883 nera += era + 1; 884 if (nera > VM_FAULT_READ_AHEAD_MAX) 885 nera = VM_FAULT_READ_AHEAD_MAX; 886 } 887 if (era == VM_FAULT_READ_AHEAD_MAX) 888 vm_fault_dontneed(fs, fs->vaddr, nera); 889 } else { 890 /* 891 * This is a non-sequential fault. 892 */ 893 nera = 0; 894 } 895 if (era != nera) { 896 /* 897 * A read lock on the map suffices to update 898 * the read ahead count safely. 899 */ 900 fs->entry->read_ahead = nera; 901 } 902 903 return (nera); 904 } 905 906 static int 907 vm_fault_lookup(struct faultstate *fs) 908 { 909 int result; 910 911 KASSERT(!fs->lookup_still_valid, 912 ("vm_fault_lookup: Map already locked.")); 913 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 914 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 915 &fs->first_pindex, &fs->prot, &fs->wired); 916 if (result != KERN_SUCCESS) { 917 vm_fault_unlock_vp(fs); 918 return (result); 919 } 920 921 fs->map_generation = fs->map->timestamp; 922 923 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 924 panic("%s: fault on nofault entry, addr: %#lx", 925 __func__, (u_long)fs->vaddr); 926 } 927 928 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 929 fs->entry->wiring_thread != curthread) { 930 vm_map_unlock_read(fs->map); 931 vm_map_lock(fs->map); 932 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 933 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 934 vm_fault_unlock_vp(fs); 935 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 936 vm_map_unlock_and_wait(fs->map, 0); 937 } else 938 vm_map_unlock(fs->map); 939 return (KERN_RESOURCE_SHORTAGE); 940 } 941 942 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 943 944 if (fs->wired) 945 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 946 else 947 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 948 ("!fs->wired && VM_FAULT_WIRE")); 949 fs->lookup_still_valid = true; 950 951 return (KERN_SUCCESS); 952 } 953 954 static int 955 vm_fault_relookup(struct faultstate *fs) 956 { 957 vm_object_t retry_object; 958 vm_pindex_t retry_pindex; 959 vm_prot_t retry_prot; 960 int result; 961 962 if (!vm_map_trylock_read(fs->map)) 963 return (KERN_RESTART); 964 965 fs->lookup_still_valid = true; 966 if (fs->map->timestamp == fs->map_generation) 967 return (KERN_SUCCESS); 968 969 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 970 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 971 &fs->wired); 972 if (result != KERN_SUCCESS) { 973 /* 974 * If retry of map lookup would have blocked then 975 * retry fault from start. 976 */ 977 if (result == KERN_FAILURE) 978 return (KERN_RESTART); 979 return (result); 980 } 981 if (retry_object != fs->first_object || 982 retry_pindex != fs->first_pindex) 983 return (KERN_RESTART); 984 985 /* 986 * Check whether the protection has changed or the object has 987 * been copied while we left the map unlocked. Changing from 988 * read to write permission is OK - we leave the page 989 * write-protected, and catch the write fault. Changing from 990 * write to read permission means that we can't mark the page 991 * write-enabled after all. 992 */ 993 fs->prot &= retry_prot; 994 fs->fault_type &= retry_prot; 995 if (fs->prot == 0) 996 return (KERN_RESTART); 997 998 /* Reassert because wired may have changed. */ 999 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 1000 ("!wired && VM_FAULT_WIRE")); 1001 1002 return (KERN_SUCCESS); 1003 } 1004 1005 static void 1006 vm_fault_cow(struct faultstate *fs) 1007 { 1008 bool is_first_object_locked; 1009 1010 KASSERT(fs->object != fs->first_object, 1011 ("source and target COW objects are identical")); 1012 1013 /* 1014 * This allows pages to be virtually copied from a backing_object 1015 * into the first_object, where the backing object has no other 1016 * refs to it, and cannot gain any more refs. Instead of a bcopy, 1017 * we just move the page from the backing object to the first 1018 * object. Note that we must mark the page dirty in the first 1019 * object so that it will go out to swap when needed. 1020 */ 1021 is_first_object_locked = false; 1022 if ( 1023 /* 1024 * Only one shadow object and no other refs. 1025 */ 1026 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 1027 /* 1028 * No other ways to look the object up 1029 */ 1030 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 1031 /* 1032 * We don't chase down the shadow chain and we can acquire locks. 1033 */ 1034 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 1035 fs->object == fs->first_object->backing_object && 1036 VM_OBJECT_TRYWLOCK(fs->object)) { 1037 /* 1038 * Remove but keep xbusy for replace. fs->m is moved into 1039 * fs->first_object and left busy while fs->first_m is 1040 * conditionally freed. 1041 */ 1042 vm_page_remove_xbusy(fs->m); 1043 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 1044 fs->first_m); 1045 vm_page_dirty(fs->m); 1046 #if VM_NRESERVLEVEL > 0 1047 /* 1048 * Rename the reservation. 1049 */ 1050 vm_reserv_rename(fs->m, fs->first_object, fs->object, 1051 OFF_TO_IDX(fs->first_object->backing_object_offset)); 1052 #endif 1053 VM_OBJECT_WUNLOCK(fs->object); 1054 VM_OBJECT_WUNLOCK(fs->first_object); 1055 fs->first_m = fs->m; 1056 fs->m = NULL; 1057 VM_CNT_INC(v_cow_optim); 1058 } else { 1059 if (is_first_object_locked) 1060 VM_OBJECT_WUNLOCK(fs->first_object); 1061 /* 1062 * Oh, well, lets copy it. 1063 */ 1064 pmap_copy_page(fs->m, fs->first_m); 1065 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 1066 vm_page_wire(fs->first_m); 1067 vm_page_unwire(fs->m, PQ_INACTIVE); 1068 } 1069 /* 1070 * Save the COW page to be released after pmap_enter is 1071 * complete. The new copy will be marked valid when we're ready 1072 * to map it. 1073 */ 1074 fs->m_cow = fs->m; 1075 fs->m = NULL; 1076 1077 /* 1078 * Typically, the shadow object is either private to this 1079 * address space (OBJ_ONEMAPPING) or its pages are read only. 1080 * In the highly unusual case where the pages of a shadow object 1081 * are read/write shared between this and other address spaces, 1082 * we need to ensure that any pmap-level mappings to the 1083 * original, copy-on-write page from the backing object are 1084 * removed from those other address spaces. 1085 * 1086 * The flag check is racy, but this is tolerable: if 1087 * OBJ_ONEMAPPING is cleared after the check, the busy state 1088 * ensures that new mappings of m_cow can't be created. 1089 * pmap_enter() will replace an existing mapping in the current 1090 * address space. If OBJ_ONEMAPPING is set after the check, 1091 * removing mappings will at worse trigger some unnecessary page 1092 * faults. 1093 */ 1094 vm_page_assert_xbusied(fs->m_cow); 1095 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1096 pmap_remove_all(fs->m_cow); 1097 } 1098 1099 vm_object_pip_wakeup(fs->object); 1100 1101 /* 1102 * Only use the new page below... 1103 */ 1104 fs->object = fs->first_object; 1105 fs->pindex = fs->first_pindex; 1106 fs->m = fs->first_m; 1107 VM_CNT_INC(v_cow_faults); 1108 curthread->td_cow++; 1109 } 1110 1111 static enum fault_next_status 1112 vm_fault_next(struct faultstate *fs) 1113 { 1114 vm_object_t next_object; 1115 1116 if (fs->object == fs->first_object || !fs->can_read_lock) 1117 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1118 else 1119 VM_OBJECT_ASSERT_LOCKED(fs->object); 1120 1121 /* 1122 * The requested page does not exist at this object/ 1123 * offset. Remove the invalid page from the object, 1124 * waking up anyone waiting for it, and continue on to 1125 * the next object. However, if this is the top-level 1126 * object, we must leave the busy page in place to 1127 * prevent another process from rushing past us, and 1128 * inserting the page in that object at the same time 1129 * that we are. 1130 */ 1131 if (fs->object == fs->first_object) { 1132 fs->first_m = fs->m; 1133 fs->m = NULL; 1134 } else if (fs->m != NULL) { 1135 if (!vm_fault_object_ensure_wlocked(fs)) { 1136 fs->can_read_lock = false; 1137 vm_fault_unlock_and_deallocate(fs); 1138 return (FAULT_NEXT_RESTART); 1139 } 1140 vm_fault_page_free(&fs->m); 1141 } 1142 1143 /* 1144 * Move on to the next object. Lock the next object before 1145 * unlocking the current one. 1146 */ 1147 next_object = fs->object->backing_object; 1148 if (next_object == NULL) 1149 return (FAULT_NEXT_NOOBJ); 1150 MPASS(fs->first_m != NULL); 1151 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1152 if (fs->can_read_lock) 1153 VM_OBJECT_RLOCK(next_object); 1154 else 1155 VM_OBJECT_WLOCK(next_object); 1156 vm_object_pip_add(next_object, 1); 1157 if (fs->object != fs->first_object) 1158 vm_object_pip_wakeup(fs->object); 1159 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1160 VM_OBJECT_UNLOCK(fs->object); 1161 fs->object = next_object; 1162 1163 return (FAULT_NEXT_GOTOBJ); 1164 } 1165 1166 static void 1167 vm_fault_zerofill(struct faultstate *fs) 1168 { 1169 1170 /* 1171 * If there's no object left, fill the page in the top 1172 * object with zeros. 1173 */ 1174 if (fs->object != fs->first_object) { 1175 vm_object_pip_wakeup(fs->object); 1176 fs->object = fs->first_object; 1177 fs->pindex = fs->first_pindex; 1178 } 1179 MPASS(fs->first_m != NULL); 1180 MPASS(fs->m == NULL); 1181 fs->m = fs->first_m; 1182 fs->first_m = NULL; 1183 1184 /* 1185 * Zero the page if necessary and mark it valid. 1186 */ 1187 if ((fs->m->flags & PG_ZERO) == 0) { 1188 pmap_zero_page(fs->m); 1189 } else { 1190 VM_CNT_INC(v_ozfod); 1191 } 1192 VM_CNT_INC(v_zfod); 1193 vm_page_valid(fs->m); 1194 } 1195 1196 /* 1197 * Initiate page fault after timeout. Returns true if caller should 1198 * do vm_waitpfault() after the call. 1199 */ 1200 static bool 1201 vm_fault_allocate_oom(struct faultstate *fs) 1202 { 1203 struct timeval now; 1204 1205 vm_fault_unlock_and_deallocate(fs); 1206 if (vm_pfault_oom_attempts < 0) 1207 return (true); 1208 if (!fs->oom_started) { 1209 fs->oom_started = true; 1210 getmicrotime(&fs->oom_start_time); 1211 return (true); 1212 } 1213 1214 getmicrotime(&now); 1215 timevalsub(&now, &fs->oom_start_time); 1216 if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1217 return (true); 1218 1219 if (bootverbose) 1220 printf( 1221 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1222 curproc->p_pid, curproc->p_comm); 1223 vm_pageout_oom(VM_OOM_MEM_PF); 1224 fs->oom_started = false; 1225 return (false); 1226 } 1227 1228 /* 1229 * Allocate a page directly or via the object populate method. 1230 */ 1231 static enum fault_status 1232 vm_fault_allocate(struct faultstate *fs, struct pctrie_iter *pages) 1233 { 1234 struct domainset *dset; 1235 enum fault_status res; 1236 1237 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1238 res = vm_fault_lock_vnode(fs, true); 1239 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1240 if (res == FAULT_RESTART) 1241 return (res); 1242 } 1243 1244 if (fs->pindex >= fs->object->size) { 1245 vm_fault_unlock_and_deallocate(fs); 1246 return (FAULT_OUT_OF_BOUNDS); 1247 } 1248 1249 if (fs->object == fs->first_object && 1250 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1251 fs->first_object->shadow_count == 0) { 1252 res = vm_fault_populate(fs); 1253 switch (res) { 1254 case FAULT_SUCCESS: 1255 case FAULT_FAILURE: 1256 case FAULT_RESTART: 1257 vm_fault_unlock_and_deallocate(fs); 1258 return (res); 1259 case FAULT_CONTINUE: 1260 pctrie_iter_reset(pages); 1261 /* 1262 * Pager's populate() method 1263 * returned VM_PAGER_BAD. 1264 */ 1265 break; 1266 default: 1267 panic("inconsistent return codes"); 1268 } 1269 } 1270 1271 /* 1272 * Allocate a new page for this object/offset pair. 1273 * 1274 * If the process has a fatal signal pending, prioritize the allocation 1275 * with the expectation that the process will exit shortly and free some 1276 * pages. In particular, the signal may have been posted by the page 1277 * daemon in an attempt to resolve an out-of-memory condition. 1278 * 1279 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1280 * might be not observed here, and allocation fails, causing a restart 1281 * and new reading of the p_flag. 1282 */ 1283 dset = fs->object->domain.dr_policy; 1284 if (dset == NULL) 1285 dset = curthread->td_domain.dr_policy; 1286 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1287 #if VM_NRESERVLEVEL > 0 1288 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1289 #endif 1290 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) { 1291 vm_fault_unlock_and_deallocate(fs); 1292 return (FAULT_FAILURE); 1293 } 1294 fs->m = vm_page_alloc_after(fs->object, pages, fs->pindex, 1295 P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0, 1296 vm_radix_iter_lookup_lt(pages, fs->pindex)); 1297 } 1298 if (fs->m == NULL) { 1299 if (vm_fault_allocate_oom(fs)) 1300 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1301 return (FAULT_RESTART); 1302 } 1303 fs->oom_started = false; 1304 1305 return (FAULT_CONTINUE); 1306 } 1307 1308 /* 1309 * Call the pager to retrieve the page if there is a chance 1310 * that the pager has it, and potentially retrieve additional 1311 * pages at the same time. 1312 */ 1313 static enum fault_status 1314 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 1315 { 1316 vm_offset_t e_end, e_start; 1317 int ahead, behind, cluster_offset, rv; 1318 enum fault_status status; 1319 u_char behavior; 1320 1321 /* 1322 * Prepare for unlocking the map. Save the map 1323 * entry's start and end addresses, which are used to 1324 * optimize the size of the pager operation below. 1325 * Even if the map entry's addresses change after 1326 * unlocking the map, using the saved addresses is 1327 * safe. 1328 */ 1329 e_start = fs->entry->start; 1330 e_end = fs->entry->end; 1331 behavior = vm_map_entry_behavior(fs->entry); 1332 1333 /* 1334 * If the pager for the current object might have 1335 * the page, then determine the number of additional 1336 * pages to read and potentially reprioritize 1337 * previously read pages for earlier reclamation. 1338 * These operations should only be performed once per 1339 * page fault. Even if the current pager doesn't 1340 * have the page, the number of additional pages to 1341 * read will apply to subsequent objects in the 1342 * shadow chain. 1343 */ 1344 if (fs->nera == -1 && !P_KILLED(curproc)) 1345 fs->nera = vm_fault_readahead(fs); 1346 1347 /* 1348 * Release the map lock before locking the vnode or 1349 * sleeping in the pager. (If the current object has 1350 * a shadow, then an earlier iteration of this loop 1351 * may have already unlocked the map.) 1352 */ 1353 vm_fault_unlock_map(fs); 1354 1355 status = vm_fault_lock_vnode(fs, false); 1356 MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1357 if (status == FAULT_RESTART) 1358 return (status); 1359 KASSERT(fs->vp == NULL || !vm_map_is_system(fs->map), 1360 ("vm_fault: vnode-backed object mapped by system map")); 1361 1362 /* 1363 * Page in the requested page and hint the pager, 1364 * that it may bring up surrounding pages. 1365 */ 1366 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1367 P_KILLED(curproc)) { 1368 behind = 0; 1369 ahead = 0; 1370 } else { 1371 /* Is this a sequential fault? */ 1372 if (fs->nera > 0) { 1373 behind = 0; 1374 ahead = fs->nera; 1375 } else { 1376 /* 1377 * Request a cluster of pages that is 1378 * aligned to a VM_FAULT_READ_DEFAULT 1379 * page offset boundary within the 1380 * object. Alignment to a page offset 1381 * boundary is more likely to coincide 1382 * with the underlying file system 1383 * block than alignment to a virtual 1384 * address boundary. 1385 */ 1386 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1387 behind = ulmin(cluster_offset, 1388 atop(fs->vaddr - e_start)); 1389 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1390 } 1391 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1392 } 1393 *behindp = behind; 1394 *aheadp = ahead; 1395 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1396 if (rv == VM_PAGER_OK) 1397 return (FAULT_HARD); 1398 if (rv == VM_PAGER_ERROR) 1399 printf("vm_fault: pager read error, pid %d (%s)\n", 1400 curproc->p_pid, curproc->p_comm); 1401 /* 1402 * If an I/O error occurred or the requested page was 1403 * outside the range of the pager, clean up and return 1404 * an error. 1405 */ 1406 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1407 VM_OBJECT_WLOCK(fs->object); 1408 vm_fault_page_free(&fs->m); 1409 vm_fault_unlock_and_deallocate(fs); 1410 return (FAULT_OUT_OF_BOUNDS); 1411 } 1412 KASSERT(rv == VM_PAGER_FAIL, 1413 ("%s: unexpected pager error %d", __func__, rv)); 1414 return (FAULT_CONTINUE); 1415 } 1416 1417 /* 1418 * Wait/Retry if the page is busy. We have to do this if the page is 1419 * either exclusive or shared busy because the vm_pager may be using 1420 * read busy for pageouts (and even pageins if it is the vnode pager), 1421 * and we could end up trying to pagein and pageout the same page 1422 * simultaneously. 1423 * 1424 * We can theoretically allow the busy case on a read fault if the page 1425 * is marked valid, but since such pages are typically already pmap'd, 1426 * putting that special case in might be more effort then it is worth. 1427 * We cannot under any circumstances mess around with a shared busied 1428 * page except, perhaps, to pmap it. 1429 */ 1430 static void 1431 vm_fault_busy_sleep(struct faultstate *fs) 1432 { 1433 /* 1434 * Reference the page before unlocking and 1435 * sleeping so that the page daemon is less 1436 * likely to reclaim it. 1437 */ 1438 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1439 if (fs->object != fs->first_object) { 1440 vm_fault_page_release(&fs->first_m); 1441 vm_object_pip_wakeup(fs->first_object); 1442 } 1443 vm_object_pip_wakeup(fs->object); 1444 vm_fault_unlock_map(fs); 1445 if (fs->m != vm_page_lookup(fs->object, fs->pindex) || 1446 !vm_page_busy_sleep(fs->m, "vmpfw", 0)) 1447 VM_OBJECT_UNLOCK(fs->object); 1448 VM_CNT_INC(v_intrans); 1449 vm_object_deallocate(fs->first_object); 1450 } 1451 1452 /* 1453 * Handle page lookup, populate, allocate, page-in for the current 1454 * object. 1455 * 1456 * The object is locked on entry and will remain locked with a return 1457 * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1458 * Otherwise, the object will be unlocked upon return. 1459 */ 1460 static enum fault_status 1461 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1462 { 1463 struct pctrie_iter pages; 1464 enum fault_status res; 1465 bool dead; 1466 1467 if (fs->object == fs->first_object || !fs->can_read_lock) 1468 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1469 else 1470 VM_OBJECT_ASSERT_LOCKED(fs->object); 1471 1472 /* 1473 * If the object is marked for imminent termination, we retry 1474 * here, since the collapse pass has raced with us. Otherwise, 1475 * if we see terminally dead object, return fail. 1476 */ 1477 if ((fs->object->flags & OBJ_DEAD) != 0) { 1478 dead = fs->object->type == OBJT_DEAD; 1479 vm_fault_unlock_and_deallocate(fs); 1480 if (dead) 1481 return (FAULT_PROTECTION_FAILURE); 1482 pause("vmf_de", 1); 1483 return (FAULT_RESTART); 1484 } 1485 1486 /* 1487 * See if the page is resident. 1488 */ 1489 vm_page_iter_init(&pages, fs->object); 1490 fs->m = vm_radix_iter_lookup(&pages, fs->pindex); 1491 if (fs->m != NULL) { 1492 if (!vm_page_tryxbusy(fs->m)) { 1493 vm_fault_busy_sleep(fs); 1494 return (FAULT_RESTART); 1495 } 1496 1497 /* 1498 * The page is marked busy for other processes and the 1499 * pagedaemon. If it is still completely valid we are 1500 * done. 1501 */ 1502 if (vm_page_all_valid(fs->m)) { 1503 VM_OBJECT_UNLOCK(fs->object); 1504 return (FAULT_SOFT); 1505 } 1506 } 1507 1508 /* 1509 * Page is not resident. If the pager might contain the page 1510 * or this is the beginning of the search, allocate a new 1511 * page. 1512 */ 1513 if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) || 1514 fs->object == fs->first_object)) { 1515 if (!vm_fault_object_ensure_wlocked(fs)) { 1516 fs->can_read_lock = false; 1517 vm_fault_unlock_and_deallocate(fs); 1518 return (FAULT_RESTART); 1519 } 1520 res = vm_fault_allocate(fs, &pages); 1521 if (res != FAULT_CONTINUE) 1522 return (res); 1523 } 1524 1525 /* 1526 * Check to see if the pager can possibly satisfy this fault. 1527 * If not, skip to the next object without dropping the lock to 1528 * preserve atomicity of shadow faults. 1529 */ 1530 if (vm_fault_object_needs_getpages(fs->object)) { 1531 /* 1532 * At this point, we have either allocated a new page 1533 * or found an existing page that is only partially 1534 * valid. 1535 * 1536 * We hold a reference on the current object and the 1537 * page is exclusive busied. The exclusive busy 1538 * prevents simultaneous faults and collapses while 1539 * the object lock is dropped. 1540 */ 1541 VM_OBJECT_UNLOCK(fs->object); 1542 res = vm_fault_getpages(fs, behindp, aheadp); 1543 if (res == FAULT_CONTINUE) 1544 VM_OBJECT_WLOCK(fs->object); 1545 } else { 1546 res = FAULT_CONTINUE; 1547 } 1548 return (res); 1549 } 1550 1551 int 1552 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1553 int fault_flags, vm_page_t *m_hold) 1554 { 1555 struct pctrie_iter pages; 1556 struct faultstate fs; 1557 int ahead, behind, faultcount, rv; 1558 enum fault_status res; 1559 enum fault_next_status res_next; 1560 bool hardfault; 1561 1562 VM_CNT_INC(v_vm_faults); 1563 1564 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1565 return (KERN_PROTECTION_FAILURE); 1566 1567 fs.vp = NULL; 1568 fs.vaddr = vaddr; 1569 fs.m_hold = m_hold; 1570 fs.fault_flags = fault_flags; 1571 fs.map = map; 1572 fs.lookup_still_valid = false; 1573 fs.oom_started = false; 1574 fs.nera = -1; 1575 fs.can_read_lock = true; 1576 faultcount = 0; 1577 hardfault = false; 1578 1579 RetryFault: 1580 fs.fault_type = fault_type; 1581 1582 /* 1583 * Find the backing store object and offset into it to begin the 1584 * search. 1585 */ 1586 rv = vm_fault_lookup(&fs); 1587 if (rv != KERN_SUCCESS) { 1588 if (rv == KERN_RESOURCE_SHORTAGE) 1589 goto RetryFault; 1590 return (rv); 1591 } 1592 1593 /* 1594 * Try to avoid lock contention on the top-level object through 1595 * special-case handling of some types of page faults, specifically, 1596 * those that are mapping an existing page from the top-level object. 1597 * Under this condition, a read lock on the object suffices, allowing 1598 * multiple page faults of a similar type to run in parallel. 1599 */ 1600 if (fs.vp == NULL /* avoid locked vnode leak */ && 1601 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1602 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1603 res = vm_fault_soft_fast(&fs); 1604 if (res == FAULT_SUCCESS) { 1605 VM_OBJECT_ASSERT_UNLOCKED(fs.first_object); 1606 return (KERN_SUCCESS); 1607 } 1608 VM_OBJECT_ASSERT_WLOCKED(fs.first_object); 1609 } else { 1610 vm_page_iter_init(&pages, fs.first_object); 1611 VM_OBJECT_WLOCK(fs.first_object); 1612 } 1613 1614 /* 1615 * Make a reference to this object to prevent its disposal while we 1616 * are messing with it. Once we have the reference, the map is free 1617 * to be diddled. Since objects reference their shadows (and copies), 1618 * they will stay around as well. 1619 * 1620 * Bump the paging-in-progress count to prevent size changes (e.g. 1621 * truncation operations) during I/O. 1622 */ 1623 vm_object_reference_locked(fs.first_object); 1624 vm_object_pip_add(fs.first_object, 1); 1625 1626 fs.m_cow = fs.m = fs.first_m = NULL; 1627 1628 /* 1629 * Search for the page at object/offset. 1630 */ 1631 fs.object = fs.first_object; 1632 fs.pindex = fs.first_pindex; 1633 1634 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1635 res = vm_fault_allocate(&fs, &pages); 1636 switch (res) { 1637 case FAULT_RESTART: 1638 goto RetryFault; 1639 case FAULT_SUCCESS: 1640 return (KERN_SUCCESS); 1641 case FAULT_FAILURE: 1642 return (KERN_FAILURE); 1643 case FAULT_OUT_OF_BOUNDS: 1644 return (KERN_OUT_OF_BOUNDS); 1645 case FAULT_CONTINUE: 1646 break; 1647 default: 1648 panic("vm_fault: Unhandled status %d", res); 1649 } 1650 } 1651 1652 while (TRUE) { 1653 KASSERT(fs.m == NULL, 1654 ("page still set %p at loop start", fs.m)); 1655 1656 res = vm_fault_object(&fs, &behind, &ahead); 1657 switch (res) { 1658 case FAULT_SOFT: 1659 goto found; 1660 case FAULT_HARD: 1661 faultcount = behind + 1 + ahead; 1662 hardfault = true; 1663 goto found; 1664 case FAULT_RESTART: 1665 goto RetryFault; 1666 case FAULT_SUCCESS: 1667 return (KERN_SUCCESS); 1668 case FAULT_FAILURE: 1669 return (KERN_FAILURE); 1670 case FAULT_OUT_OF_BOUNDS: 1671 return (KERN_OUT_OF_BOUNDS); 1672 case FAULT_PROTECTION_FAILURE: 1673 return (KERN_PROTECTION_FAILURE); 1674 case FAULT_CONTINUE: 1675 break; 1676 default: 1677 panic("vm_fault: Unhandled status %d", res); 1678 } 1679 1680 /* 1681 * The page was not found in the current object. Try to 1682 * traverse into a backing object or zero fill if none is 1683 * found. 1684 */ 1685 res_next = vm_fault_next(&fs); 1686 if (res_next == FAULT_NEXT_RESTART) 1687 goto RetryFault; 1688 else if (res_next == FAULT_NEXT_GOTOBJ) 1689 continue; 1690 MPASS(res_next == FAULT_NEXT_NOOBJ); 1691 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1692 if (fs.first_object == fs.object) 1693 vm_fault_page_free(&fs.first_m); 1694 vm_fault_unlock_and_deallocate(&fs); 1695 return (KERN_OUT_OF_BOUNDS); 1696 } 1697 VM_OBJECT_UNLOCK(fs.object); 1698 vm_fault_zerofill(&fs); 1699 /* Don't try to prefault neighboring pages. */ 1700 faultcount = 1; 1701 break; 1702 } 1703 1704 found: 1705 /* 1706 * A valid page has been found and exclusively busied. The 1707 * object lock must no longer be held. 1708 */ 1709 vm_page_assert_xbusied(fs.m); 1710 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1711 1712 /* 1713 * If the page is being written, but isn't already owned by the 1714 * top-level object, we have to copy it into a new page owned by the 1715 * top-level object. 1716 */ 1717 if (fs.object != fs.first_object) { 1718 /* 1719 * We only really need to copy if we want to write it. 1720 */ 1721 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1722 vm_fault_cow(&fs); 1723 /* 1724 * We only try to prefault read-only mappings to the 1725 * neighboring pages when this copy-on-write fault is 1726 * a hard fault. In other cases, trying to prefault 1727 * is typically wasted effort. 1728 */ 1729 if (faultcount == 0) 1730 faultcount = 1; 1731 1732 } else { 1733 fs.prot &= ~VM_PROT_WRITE; 1734 } 1735 } 1736 1737 /* 1738 * We must verify that the maps have not changed since our last 1739 * lookup. 1740 */ 1741 if (!fs.lookup_still_valid) { 1742 rv = vm_fault_relookup(&fs); 1743 if (rv != KERN_SUCCESS) { 1744 vm_fault_deallocate(&fs); 1745 if (rv == KERN_RESTART) 1746 goto RetryFault; 1747 return (rv); 1748 } 1749 } 1750 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1751 1752 /* 1753 * If the page was filled by a pager, save the virtual address that 1754 * should be faulted on next under a sequential access pattern to the 1755 * map entry. A read lock on the map suffices to update this address 1756 * safely. 1757 */ 1758 if (hardfault) 1759 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1760 1761 /* 1762 * If the page to be mapped was copied from a backing object, we defer 1763 * marking it valid until here, where the fault handler is guaranteed to 1764 * succeed. Otherwise we can end up with a shadowed, mapped page in the 1765 * backing object, which violates an invariant of vm_object_collapse() 1766 * that shadowed pages are not mapped. 1767 */ 1768 if (fs.m_cow != NULL) { 1769 KASSERT(vm_page_none_valid(fs.m), 1770 ("vm_fault: page %p is already valid", fs.m_cow)); 1771 vm_page_valid(fs.m); 1772 } 1773 1774 /* 1775 * Page must be completely valid or it is not fit to 1776 * map into user space. vm_pager_get_pages() ensures this. 1777 */ 1778 vm_page_assert_xbusied(fs.m); 1779 KASSERT(vm_page_all_valid(fs.m), 1780 ("vm_fault: page %p partially invalid", fs.m)); 1781 1782 vm_fault_dirty(&fs, fs.m); 1783 1784 /* 1785 * Put this page into the physical map. We had to do the unlock above 1786 * because pmap_enter() may sleep. We don't put the page 1787 * back on the active queue until later so that the pageout daemon 1788 * won't find it (yet). 1789 */ 1790 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1791 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1792 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1793 fs.wired == 0) 1794 vm_fault_prefault(&fs, vaddr, 1795 faultcount > 0 ? behind : PFBAK, 1796 faultcount > 0 ? ahead : PFFOR, false); 1797 1798 /* 1799 * If the page is not wired down, then put it where the pageout daemon 1800 * can find it. 1801 */ 1802 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1803 vm_page_wire(fs.m); 1804 else 1805 vm_page_activate(fs.m); 1806 if (fs.m_hold != NULL) { 1807 (*fs.m_hold) = fs.m; 1808 vm_page_wire(fs.m); 1809 } 1810 vm_page_xunbusy(fs.m); 1811 fs.m = NULL; 1812 1813 /* 1814 * Unlock everything, and return 1815 */ 1816 vm_fault_deallocate(&fs); 1817 if (hardfault) { 1818 VM_CNT_INC(v_io_faults); 1819 curthread->td_ru.ru_majflt++; 1820 #ifdef RACCT 1821 if (racct_enable && fs.object->type == OBJT_VNODE) { 1822 PROC_LOCK(curproc); 1823 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1824 racct_add_force(curproc, RACCT_WRITEBPS, 1825 PAGE_SIZE + behind * PAGE_SIZE); 1826 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1827 } else { 1828 racct_add_force(curproc, RACCT_READBPS, 1829 PAGE_SIZE + ahead * PAGE_SIZE); 1830 racct_add_force(curproc, RACCT_READIOPS, 1); 1831 } 1832 PROC_UNLOCK(curproc); 1833 } 1834 #endif 1835 } else 1836 curthread->td_ru.ru_minflt++; 1837 1838 return (KERN_SUCCESS); 1839 } 1840 1841 /* 1842 * Speed up the reclamation of pages that precede the faulting pindex within 1843 * the first object of the shadow chain. Essentially, perform the equivalent 1844 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1845 * the faulting pindex by the cluster size when the pages read by vm_fault() 1846 * cross a cluster-size boundary. The cluster size is the greater of the 1847 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1848 * 1849 * When "fs->first_object" is a shadow object, the pages in the backing object 1850 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1851 * function must only be concerned with pages in the first object. 1852 */ 1853 static void 1854 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1855 { 1856 struct pctrie_iter pages; 1857 vm_map_entry_t entry; 1858 vm_object_t first_object; 1859 vm_offset_t end, start; 1860 vm_page_t m; 1861 vm_size_t size; 1862 1863 VM_OBJECT_ASSERT_UNLOCKED(fs->object); 1864 first_object = fs->first_object; 1865 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1866 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1867 VM_OBJECT_RLOCK(first_object); 1868 size = VM_FAULT_DONTNEED_MIN; 1869 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1870 size = pagesizes[1]; 1871 end = rounddown2(vaddr, size); 1872 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1873 (entry = fs->entry)->start < end) { 1874 if (end - entry->start < size) 1875 start = entry->start; 1876 else 1877 start = end - size; 1878 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1879 vm_page_iter_limit_init(&pages, first_object, 1880 OFF_TO_IDX(entry->offset) + 1881 atop(end - entry->start)); 1882 VM_RADIX_FOREACH_FROM(m, &pages, 1883 OFF_TO_IDX(entry->offset) + 1884 atop(start - entry->start)) { 1885 if (!vm_page_all_valid(m) || 1886 vm_page_busied(m)) 1887 continue; 1888 1889 /* 1890 * Don't clear PGA_REFERENCED, since it would 1891 * likely represent a reference by a different 1892 * process. 1893 * 1894 * Typically, at this point, prefetched pages 1895 * are still in the inactive queue. Only 1896 * pages that triggered page faults are in the 1897 * active queue. The test for whether the page 1898 * is in the inactive queue is racy; in the 1899 * worst case we will requeue the page 1900 * unnecessarily. 1901 */ 1902 if (!vm_page_inactive(m)) 1903 vm_page_deactivate(m); 1904 } 1905 } 1906 VM_OBJECT_RUNLOCK(first_object); 1907 } 1908 } 1909 1910 /* 1911 * vm_fault_prefault provides a quick way of clustering 1912 * pagefaults into a processes address space. It is a "cousin" 1913 * of vm_map_pmap_enter, except it runs at page fault time instead 1914 * of mmap time. 1915 */ 1916 static void 1917 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1918 int backward, int forward, bool obj_locked) 1919 { 1920 pmap_t pmap; 1921 vm_map_entry_t entry; 1922 vm_object_t backing_object, lobject; 1923 vm_offset_t addr, starta; 1924 vm_pindex_t pindex; 1925 vm_page_t m; 1926 vm_prot_t prot; 1927 int i; 1928 1929 pmap = fs->map->pmap; 1930 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1931 return; 1932 1933 entry = fs->entry; 1934 1935 if (addra < backward * PAGE_SIZE) { 1936 starta = entry->start; 1937 } else { 1938 starta = addra - backward * PAGE_SIZE; 1939 if (starta < entry->start) 1940 starta = entry->start; 1941 } 1942 prot = entry->protection; 1943 1944 /* 1945 * If pmap_enter() has enabled write access on a nearby mapping, then 1946 * don't attempt promotion, because it will fail. 1947 */ 1948 if ((fs->prot & VM_PROT_WRITE) != 0) 1949 prot |= VM_PROT_NO_PROMOTE; 1950 1951 /* 1952 * Generate the sequence of virtual addresses that are candidates for 1953 * prefaulting in an outward spiral from the faulting virtual address, 1954 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1955 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1956 * If the candidate address doesn't have a backing physical page, then 1957 * the loop immediately terminates. 1958 */ 1959 for (i = 0; i < 2 * imax(backward, forward); i++) { 1960 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1961 PAGE_SIZE); 1962 if (addr > addra + forward * PAGE_SIZE) 1963 addr = 0; 1964 1965 if (addr < starta || addr >= entry->end) 1966 continue; 1967 1968 if (!pmap_is_prefaultable(pmap, addr)) 1969 continue; 1970 1971 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1972 lobject = entry->object.vm_object; 1973 if (!obj_locked) 1974 VM_OBJECT_RLOCK(lobject); 1975 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1976 !vm_fault_object_needs_getpages(lobject) && 1977 (backing_object = lobject->backing_object) != NULL) { 1978 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1979 0, ("vm_fault_prefault: unaligned object offset")); 1980 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1981 VM_OBJECT_RLOCK(backing_object); 1982 if (!obj_locked || lobject != entry->object.vm_object) 1983 VM_OBJECT_RUNLOCK(lobject); 1984 lobject = backing_object; 1985 } 1986 if (m == NULL) { 1987 if (!obj_locked || lobject != entry->object.vm_object) 1988 VM_OBJECT_RUNLOCK(lobject); 1989 break; 1990 } 1991 if (vm_page_all_valid(m) && 1992 (m->flags & PG_FICTITIOUS) == 0) 1993 pmap_enter_quick(pmap, addr, m, prot); 1994 if (!obj_locked || lobject != entry->object.vm_object) 1995 VM_OBJECT_RUNLOCK(lobject); 1996 } 1997 } 1998 1999 /* 2000 * Hold each of the physical pages that are mapped by the specified range of 2001 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 2002 * and allow the specified types of access, "prot". If all of the implied 2003 * pages are successfully held, then the number of held pages is returned 2004 * together with pointers to those pages in the array "ma". However, if any 2005 * of the pages cannot be held, -1 is returned. 2006 */ 2007 int 2008 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 2009 vm_prot_t prot, vm_page_t *ma, int max_count) 2010 { 2011 vm_offset_t end, va; 2012 vm_page_t *mp; 2013 int count; 2014 boolean_t pmap_failed; 2015 2016 if (len == 0) 2017 return (0); 2018 end = round_page(addr + len); 2019 addr = trunc_page(addr); 2020 2021 if (!vm_map_range_valid(map, addr, end)) 2022 return (-1); 2023 2024 if (atop(end - addr) > max_count) 2025 panic("vm_fault_quick_hold_pages: count > max_count"); 2026 count = atop(end - addr); 2027 2028 /* 2029 * Most likely, the physical pages are resident in the pmap, so it is 2030 * faster to try pmap_extract_and_hold() first. 2031 */ 2032 pmap_failed = FALSE; 2033 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 2034 *mp = pmap_extract_and_hold(map->pmap, va, prot); 2035 if (*mp == NULL) 2036 pmap_failed = TRUE; 2037 else if ((prot & VM_PROT_WRITE) != 0 && 2038 (*mp)->dirty != VM_PAGE_BITS_ALL) { 2039 /* 2040 * Explicitly dirty the physical page. Otherwise, the 2041 * caller's changes may go unnoticed because they are 2042 * performed through an unmanaged mapping or by a DMA 2043 * operation. 2044 * 2045 * The object lock is not held here. 2046 * See vm_page_clear_dirty_mask(). 2047 */ 2048 vm_page_dirty(*mp); 2049 } 2050 } 2051 if (pmap_failed) { 2052 /* 2053 * One or more pages could not be held by the pmap. Either no 2054 * page was mapped at the specified virtual address or that 2055 * mapping had insufficient permissions. Attempt to fault in 2056 * and hold these pages. 2057 * 2058 * If vm_fault_disable_pagefaults() was called, 2059 * i.e., TDP_NOFAULTING is set, we must not sleep nor 2060 * acquire MD VM locks, which means we must not call 2061 * vm_fault(). Some (out of tree) callers mark 2062 * too wide a code area with vm_fault_disable_pagefaults() 2063 * already, use the VM_PROT_QUICK_NOFAULT flag to request 2064 * the proper behaviour explicitly. 2065 */ 2066 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 2067 (curthread->td_pflags & TDP_NOFAULTING) != 0) 2068 goto error; 2069 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 2070 if (*mp == NULL && vm_fault(map, va, prot, 2071 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 2072 goto error; 2073 } 2074 return (count); 2075 error: 2076 for (mp = ma; mp < ma + count; mp++) 2077 if (*mp != NULL) 2078 vm_page_unwire(*mp, PQ_INACTIVE); 2079 return (-1); 2080 } 2081 2082 /* 2083 * Routine: 2084 * vm_fault_copy_entry 2085 * Function: 2086 * Create new object backing dst_entry with private copy of all 2087 * underlying pages. When src_entry is equal to dst_entry, function 2088 * implements COW for wired-down map entry. Otherwise, it forks 2089 * wired entry into dst_map. 2090 * 2091 * In/out conditions: 2092 * The source and destination maps must be locked for write. 2093 * The source map entry must be wired down (or be a sharing map 2094 * entry corresponding to a main map entry that is wired down). 2095 */ 2096 void 2097 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, 2098 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 2099 vm_ooffset_t *fork_charge) 2100 { 2101 struct pctrie_iter pages; 2102 vm_object_t backing_object, dst_object, object, src_object; 2103 vm_pindex_t dst_pindex, pindex, src_pindex; 2104 vm_prot_t access, prot; 2105 vm_offset_t vaddr; 2106 vm_page_t dst_m, mpred; 2107 vm_page_t src_m; 2108 bool upgrade; 2109 2110 upgrade = src_entry == dst_entry; 2111 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 2112 ("vm_fault_copy_entry: vm_object not NULL")); 2113 2114 /* 2115 * If not an upgrade, then enter the mappings in the pmap as 2116 * read and/or execute accesses. Otherwise, enter them as 2117 * write accesses. 2118 * 2119 * A writeable large page mapping is only created if all of 2120 * the constituent small page mappings are modified. Marking 2121 * PTEs as modified on inception allows promotion to happen 2122 * without taking potentially large number of soft faults. 2123 */ 2124 access = prot = dst_entry->protection; 2125 if (!upgrade) 2126 access &= ~VM_PROT_WRITE; 2127 2128 src_object = src_entry->object.vm_object; 2129 src_pindex = OFF_TO_IDX(src_entry->offset); 2130 2131 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2132 dst_object = src_object; 2133 vm_object_reference(dst_object); 2134 } else { 2135 /* 2136 * Create the top-level object for the destination entry. 2137 * Doesn't actually shadow anything - we copy the pages 2138 * directly. 2139 */ 2140 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 2141 dst_entry->start), NULL, NULL, 0); 2142 #if VM_NRESERVLEVEL > 0 2143 dst_object->flags |= OBJ_COLORED; 2144 dst_object->pg_color = atop(dst_entry->start); 2145 #endif 2146 dst_object->domain = src_object->domain; 2147 dst_object->charge = dst_entry->end - dst_entry->start; 2148 2149 dst_entry->object.vm_object = dst_object; 2150 dst_entry->offset = 0; 2151 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 2152 } 2153 2154 VM_OBJECT_WLOCK(dst_object); 2155 if (fork_charge != NULL) { 2156 KASSERT(dst_entry->cred == NULL, 2157 ("vm_fault_copy_entry: leaked swp charge")); 2158 dst_object->cred = curthread->td_ucred; 2159 crhold(dst_object->cred); 2160 *fork_charge += dst_object->charge; 2161 } else if ((dst_object->flags & OBJ_SWAP) != 0 && 2162 dst_object->cred == NULL) { 2163 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 2164 dst_entry)); 2165 dst_object->cred = dst_entry->cred; 2166 dst_entry->cred = NULL; 2167 } 2168 2169 /* 2170 * Loop through all of the virtual pages within the entry's 2171 * range, copying each page from the source object to the 2172 * destination object. Since the source is wired, those pages 2173 * must exist. In contrast, the destination is pageable. 2174 * Since the destination object doesn't share any backing storage 2175 * with the source object, all of its pages must be dirtied, 2176 * regardless of whether they can be written. 2177 */ 2178 vm_page_iter_init(&pages, dst_object); 2179 mpred = (src_object == dst_object) ? 2180 vm_page_mpred(src_object, src_pindex) : NULL; 2181 for (vaddr = dst_entry->start, dst_pindex = 0; 2182 vaddr < dst_entry->end; 2183 vaddr += PAGE_SIZE, dst_pindex++, mpred = dst_m) { 2184 again: 2185 /* 2186 * Find the page in the source object, and copy it in. 2187 * Because the source is wired down, the page will be 2188 * in memory. 2189 */ 2190 if (src_object != dst_object) 2191 VM_OBJECT_RLOCK(src_object); 2192 object = src_object; 2193 pindex = src_pindex + dst_pindex; 2194 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2195 (backing_object = object->backing_object) != NULL) { 2196 /* 2197 * Unless the source mapping is read-only or 2198 * it is presently being upgraded from 2199 * read-only, the first object in the shadow 2200 * chain should provide all of the pages. In 2201 * other words, this loop body should never be 2202 * executed when the source mapping is already 2203 * read/write. 2204 */ 2205 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 2206 upgrade, 2207 ("vm_fault_copy_entry: main object missing page")); 2208 2209 VM_OBJECT_RLOCK(backing_object); 2210 pindex += OFF_TO_IDX(object->backing_object_offset); 2211 if (object != dst_object) 2212 VM_OBJECT_RUNLOCK(object); 2213 object = backing_object; 2214 } 2215 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 2216 2217 if (object != dst_object) { 2218 /* 2219 * Allocate a page in the destination object. 2220 */ 2221 pindex = (src_object == dst_object ? src_pindex : 0) + 2222 dst_pindex; 2223 dst_m = vm_page_alloc_after(dst_object, &pages, pindex, 2224 VM_ALLOC_NORMAL, mpred); 2225 if (dst_m == NULL) { 2226 VM_OBJECT_WUNLOCK(dst_object); 2227 VM_OBJECT_RUNLOCK(object); 2228 vm_wait(dst_object); 2229 VM_OBJECT_WLOCK(dst_object); 2230 pctrie_iter_reset(&pages); 2231 mpred = vm_radix_iter_lookup_lt(&pages, pindex); 2232 goto again; 2233 } 2234 2235 /* 2236 * See the comment in vm_fault_cow(). 2237 */ 2238 if (src_object == dst_object && 2239 (object->flags & OBJ_ONEMAPPING) == 0) 2240 pmap_remove_all(src_m); 2241 pmap_copy_page(src_m, dst_m); 2242 2243 /* 2244 * The object lock does not guarantee that "src_m" will 2245 * transition from invalid to valid, but it does ensure 2246 * that "src_m" will not transition from valid to 2247 * invalid. 2248 */ 2249 dst_m->dirty = dst_m->valid = src_m->valid; 2250 VM_OBJECT_RUNLOCK(object); 2251 } else { 2252 dst_m = src_m; 2253 if (vm_page_busy_acquire( 2254 dst_m, VM_ALLOC_WAITFAIL) == 0) { 2255 pctrie_iter_reset(&pages); 2256 goto again; 2257 } 2258 if (dst_m->pindex >= dst_object->size) { 2259 /* 2260 * We are upgrading. Index can occur 2261 * out of bounds if the object type is 2262 * vnode and the file was truncated. 2263 */ 2264 vm_page_xunbusy(dst_m); 2265 break; 2266 } 2267 } 2268 2269 /* 2270 * Enter it in the pmap. If a wired, copy-on-write 2271 * mapping is being replaced by a write-enabled 2272 * mapping, then wire that new mapping. 2273 * 2274 * The page can be invalid if the user called 2275 * msync(MS_INVALIDATE) or truncated the backing vnode 2276 * or shared memory object. In this case, do not 2277 * insert it into pmap, but still do the copy so that 2278 * all copies of the wired map entry have similar 2279 * backing pages. 2280 */ 2281 if (vm_page_all_valid(dst_m)) { 2282 VM_OBJECT_WUNLOCK(dst_object); 2283 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2284 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2285 VM_OBJECT_WLOCK(dst_object); 2286 } 2287 2288 /* 2289 * Mark it no longer busy, and put it on the active list. 2290 */ 2291 if (upgrade) { 2292 if (src_m != dst_m) { 2293 vm_page_unwire(src_m, PQ_INACTIVE); 2294 vm_page_wire(dst_m); 2295 } else { 2296 KASSERT(vm_page_wired(dst_m), 2297 ("dst_m %p is not wired", dst_m)); 2298 } 2299 } else { 2300 vm_page_activate(dst_m); 2301 } 2302 vm_page_xunbusy(dst_m); 2303 } 2304 VM_OBJECT_WUNLOCK(dst_object); 2305 if (upgrade) { 2306 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2307 vm_object_deallocate(src_object); 2308 } 2309 } 2310 2311 /* 2312 * Block entry into the machine-independent layer's page fault handler by 2313 * the calling thread. Subsequent calls to vm_fault() by that thread will 2314 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2315 * spurious page faults. 2316 */ 2317 int 2318 vm_fault_disable_pagefaults(void) 2319 { 2320 2321 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2322 } 2323 2324 void 2325 vm_fault_enable_pagefaults(int save) 2326 { 2327 2328 curthread_pflags_restore(save); 2329 } 2330