1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/pctrie.h> 89 #include <sys/proc.h> 90 #include <sys/racct.h> 91 #include <sys/refcount.h> 92 #include <sys/resourcevar.h> 93 #include <sys/rwlock.h> 94 #include <sys/signalvar.h> 95 #include <sys/sysctl.h> 96 #include <sys/sysent.h> 97 #include <sys/vmmeter.h> 98 #include <sys/vnode.h> 99 #ifdef KTRACE 100 #include <sys/ktrace.h> 101 #endif 102 103 #include <vm/vm.h> 104 #include <vm/vm_param.h> 105 #include <vm/pmap.h> 106 #include <vm/vm_map.h> 107 #include <vm/vm_object.h> 108 #include <vm/vm_page.h> 109 #include <vm/vm_pageout.h> 110 #include <vm/vm_kern.h> 111 #include <vm/vm_pager.h> 112 #include <vm/vm_extern.h> 113 #include <vm/vm_reserv.h> 114 115 #define PFBAK 4 116 #define PFFOR 4 117 118 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 119 120 #define VM_FAULT_DONTNEED_MIN 1048576 121 122 struct faultstate { 123 /* Fault parameters. */ 124 vm_offset_t vaddr; 125 vm_page_t *m_hold; 126 vm_prot_t fault_type; 127 vm_prot_t prot; 128 int fault_flags; 129 boolean_t wired; 130 131 /* Control state. */ 132 struct timeval oom_start_time; 133 bool oom_started; 134 int nera; 135 136 /* Page reference for cow. */ 137 vm_page_t m_cow; 138 139 /* Current object. */ 140 vm_object_t object; 141 vm_pindex_t pindex; 142 vm_page_t m; 143 144 /* Top-level map object. */ 145 vm_object_t first_object; 146 vm_pindex_t first_pindex; 147 vm_page_t first_m; 148 149 /* Map state. */ 150 vm_map_t map; 151 vm_map_entry_t entry; 152 int map_generation; 153 bool lookup_still_valid; 154 155 /* Vnode if locked. */ 156 struct vnode *vp; 157 }; 158 159 /* 160 * Return codes for internal fault routines. 161 */ 162 enum fault_status { 163 FAULT_SUCCESS = 1, /* Return success to user. */ 164 FAULT_FAILURE, /* Return failure to user. */ 165 FAULT_CONTINUE, /* Continue faulting. */ 166 FAULT_RESTART, /* Restart fault. */ 167 FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 168 FAULT_HARD, /* Performed I/O. */ 169 FAULT_SOFT, /* Found valid page. */ 170 FAULT_PROTECTION_FAILURE, /* Invalid access. */ 171 }; 172 173 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 174 int ahead); 175 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 176 int backward, int forward, bool obj_locked); 177 178 static int vm_pfault_oom_attempts = 3; 179 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 180 &vm_pfault_oom_attempts, 0, 181 "Number of page allocation attempts in page fault handler before it " 182 "triggers OOM handling"); 183 184 static int vm_pfault_oom_wait = 10; 185 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 186 &vm_pfault_oom_wait, 0, 187 "Number of seconds to wait for free pages before retrying " 188 "the page fault handler"); 189 190 static inline void 191 fault_page_release(vm_page_t *mp) 192 { 193 vm_page_t m; 194 195 m = *mp; 196 if (m != NULL) { 197 /* 198 * We are likely to loop around again and attempt to busy 199 * this page. Deactivating it leaves it available for 200 * pageout while optimizing fault restarts. 201 */ 202 vm_page_deactivate(m); 203 vm_page_xunbusy(m); 204 *mp = NULL; 205 } 206 } 207 208 static inline void 209 fault_page_free(vm_page_t *mp) 210 { 211 vm_page_t m; 212 213 m = *mp; 214 if (m != NULL) { 215 VM_OBJECT_ASSERT_WLOCKED(m->object); 216 if (!vm_page_wired(m)) 217 vm_page_free(m); 218 else 219 vm_page_xunbusy(m); 220 *mp = NULL; 221 } 222 } 223 224 /* 225 * Return true if a vm_pager_get_pages() call is needed in order to check 226 * whether the pager might have a particular page, false if it can be determined 227 * immediately that the pager can not have a copy. For swap objects, this can 228 * be checked quickly. 229 */ 230 static inline bool 231 fault_object_needs_getpages(vm_object_t object) 232 { 233 VM_OBJECT_ASSERT_LOCKED(object); 234 235 return ((object->flags & OBJ_SWAP) == 0 || 236 !pctrie_is_empty(&object->un_pager.swp.swp_blks)); 237 } 238 239 static inline void 240 unlock_map(struct faultstate *fs) 241 { 242 243 if (fs->lookup_still_valid) { 244 vm_map_lookup_done(fs->map, fs->entry); 245 fs->lookup_still_valid = false; 246 } 247 } 248 249 static void 250 unlock_vp(struct faultstate *fs) 251 { 252 253 if (fs->vp != NULL) { 254 vput(fs->vp); 255 fs->vp = NULL; 256 } 257 } 258 259 static void 260 fault_deallocate(struct faultstate *fs) 261 { 262 263 fault_page_release(&fs->m_cow); 264 fault_page_release(&fs->m); 265 vm_object_pip_wakeup(fs->object); 266 if (fs->object != fs->first_object) { 267 VM_OBJECT_WLOCK(fs->first_object); 268 fault_page_free(&fs->first_m); 269 VM_OBJECT_WUNLOCK(fs->first_object); 270 vm_object_pip_wakeup(fs->first_object); 271 } 272 vm_object_deallocate(fs->first_object); 273 unlock_map(fs); 274 unlock_vp(fs); 275 } 276 277 static void 278 unlock_and_deallocate(struct faultstate *fs) 279 { 280 281 VM_OBJECT_WUNLOCK(fs->object); 282 fault_deallocate(fs); 283 } 284 285 static void 286 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 287 { 288 bool need_dirty; 289 290 if (((fs->prot & VM_PROT_WRITE) == 0 && 291 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 292 (m->oflags & VPO_UNMANAGED) != 0) 293 return; 294 295 VM_PAGE_OBJECT_BUSY_ASSERT(m); 296 297 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 298 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 299 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 300 301 vm_object_set_writeable_dirty(m->object); 302 303 /* 304 * If the fault is a write, we know that this page is being 305 * written NOW so dirty it explicitly to save on 306 * pmap_is_modified() calls later. 307 * 308 * Also, since the page is now dirty, we can possibly tell 309 * the pager to release any swap backing the page. 310 */ 311 if (need_dirty && vm_page_set_dirty(m) == 0) { 312 /* 313 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 314 * if the page is already dirty to prevent data written with 315 * the expectation of being synced from not being synced. 316 * Likewise if this entry does not request NOSYNC then make 317 * sure the page isn't marked NOSYNC. Applications sharing 318 * data should use the same flags to avoid ping ponging. 319 */ 320 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 321 vm_page_aflag_set(m, PGA_NOSYNC); 322 else 323 vm_page_aflag_clear(m, PGA_NOSYNC); 324 } 325 326 } 327 328 /* 329 * Unlocks fs.first_object and fs.map on success. 330 */ 331 static enum fault_status 332 vm_fault_soft_fast(struct faultstate *fs) 333 { 334 vm_page_t m, m_map; 335 #if VM_NRESERVLEVEL > 0 336 vm_page_t m_super; 337 int flags; 338 #endif 339 int psind; 340 vm_offset_t vaddr; 341 342 MPASS(fs->vp == NULL); 343 344 vaddr = fs->vaddr; 345 vm_object_busy(fs->first_object); 346 m = vm_page_lookup(fs->first_object, fs->first_pindex); 347 /* A busy page can be mapped for read|execute access. */ 348 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 349 vm_page_busied(m)) || !vm_page_all_valid(m)) 350 goto fail; 351 m_map = m; 352 psind = 0; 353 #if VM_NRESERVLEVEL > 0 354 if ((m->flags & PG_FICTITIOUS) == 0 && 355 (m_super = vm_reserv_to_superpage(m)) != NULL && 356 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 357 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 358 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 359 (pagesizes[m_super->psind] - 1)) && !fs->wired && 360 pmap_ps_enabled(fs->map->pmap)) { 361 flags = PS_ALL_VALID; 362 if ((fs->prot & VM_PROT_WRITE) != 0) { 363 /* 364 * Create a superpage mapping allowing write access 365 * only if none of the constituent pages are busy and 366 * all of them are already dirty (except possibly for 367 * the page that was faulted on). 368 */ 369 flags |= PS_NONE_BUSY; 370 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 371 flags |= PS_ALL_DIRTY; 372 } 373 if (vm_page_ps_test(m_super, flags, m)) { 374 m_map = m_super; 375 psind = m_super->psind; 376 vaddr = rounddown2(vaddr, pagesizes[psind]); 377 /* Preset the modified bit for dirty superpages. */ 378 if ((flags & PS_ALL_DIRTY) != 0) 379 fs->fault_type |= VM_PROT_WRITE; 380 } 381 } 382 #endif 383 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 384 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 385 KERN_SUCCESS) 386 goto fail; 387 if (fs->m_hold != NULL) { 388 (*fs->m_hold) = m; 389 vm_page_wire(m); 390 } 391 if (psind == 0 && !fs->wired) 392 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 393 VM_OBJECT_RUNLOCK(fs->first_object); 394 vm_fault_dirty(fs, m); 395 vm_object_unbusy(fs->first_object); 396 vm_map_lookup_done(fs->map, fs->entry); 397 curthread->td_ru.ru_minflt++; 398 return (FAULT_SUCCESS); 399 fail: 400 vm_object_unbusy(fs->first_object); 401 return (FAULT_FAILURE); 402 } 403 404 static void 405 vm_fault_restore_map_lock(struct faultstate *fs) 406 { 407 408 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 409 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 410 411 if (!vm_map_trylock_read(fs->map)) { 412 VM_OBJECT_WUNLOCK(fs->first_object); 413 vm_map_lock_read(fs->map); 414 VM_OBJECT_WLOCK(fs->first_object); 415 } 416 fs->lookup_still_valid = true; 417 } 418 419 static void 420 vm_fault_populate_check_page(vm_page_t m) 421 { 422 423 /* 424 * Check each page to ensure that the pager is obeying the 425 * interface: the page must be installed in the object, fully 426 * valid, and exclusively busied. 427 */ 428 MPASS(m != NULL); 429 MPASS(vm_page_all_valid(m)); 430 MPASS(vm_page_xbusied(m)); 431 } 432 433 static void 434 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 435 vm_pindex_t last) 436 { 437 vm_page_t m; 438 vm_pindex_t pidx; 439 440 VM_OBJECT_ASSERT_WLOCKED(object); 441 MPASS(first <= last); 442 for (pidx = first, m = vm_page_lookup(object, pidx); 443 pidx <= last; pidx++, m = vm_page_next(m)) { 444 vm_fault_populate_check_page(m); 445 vm_page_deactivate(m); 446 vm_page_xunbusy(m); 447 } 448 } 449 450 static enum fault_status 451 vm_fault_populate(struct faultstate *fs) 452 { 453 vm_offset_t vaddr; 454 vm_page_t m; 455 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 456 int bdry_idx, i, npages, psind, rv; 457 enum fault_status res; 458 459 MPASS(fs->object == fs->first_object); 460 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 461 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 462 MPASS(fs->first_object->backing_object == NULL); 463 MPASS(fs->lookup_still_valid); 464 465 pager_first = OFF_TO_IDX(fs->entry->offset); 466 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 467 unlock_map(fs); 468 unlock_vp(fs); 469 470 res = FAULT_SUCCESS; 471 472 /* 473 * Call the pager (driver) populate() method. 474 * 475 * There is no guarantee that the method will be called again 476 * if the current fault is for read, and a future fault is 477 * for write. Report the entry's maximum allowed protection 478 * to the driver. 479 */ 480 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 481 fs->fault_type, fs->entry->max_protection, &pager_first, 482 &pager_last); 483 484 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 485 if (rv == VM_PAGER_BAD) { 486 /* 487 * VM_PAGER_BAD is the backdoor for a pager to request 488 * normal fault handling. 489 */ 490 vm_fault_restore_map_lock(fs); 491 if (fs->map->timestamp != fs->map_generation) 492 return (FAULT_RESTART); 493 return (FAULT_CONTINUE); 494 } 495 if (rv != VM_PAGER_OK) 496 return (FAULT_FAILURE); /* AKA SIGSEGV */ 497 498 /* Ensure that the driver is obeying the interface. */ 499 MPASS(pager_first <= pager_last); 500 MPASS(fs->first_pindex <= pager_last); 501 MPASS(fs->first_pindex >= pager_first); 502 MPASS(pager_last < fs->first_object->size); 503 504 vm_fault_restore_map_lock(fs); 505 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 506 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 507 if (fs->map->timestamp != fs->map_generation) { 508 if (bdry_idx == 0) { 509 vm_fault_populate_cleanup(fs->first_object, pager_first, 510 pager_last); 511 } else { 512 m = vm_page_lookup(fs->first_object, pager_first); 513 if (m != fs->m) 514 vm_page_xunbusy(m); 515 } 516 return (FAULT_RESTART); 517 } 518 519 /* 520 * The map is unchanged after our last unlock. Process the fault. 521 * 522 * First, the special case of largepage mappings, where 523 * populate only busies the first page in superpage run. 524 */ 525 if (bdry_idx != 0) { 526 KASSERT(PMAP_HAS_LARGEPAGES, 527 ("missing pmap support for large pages")); 528 m = vm_page_lookup(fs->first_object, pager_first); 529 vm_fault_populate_check_page(m); 530 VM_OBJECT_WUNLOCK(fs->first_object); 531 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 532 fs->entry->offset; 533 /* assert alignment for entry */ 534 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 535 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 536 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 537 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 538 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 539 ("unaligned superpage m %p %#jx", m, 540 (uintmax_t)VM_PAGE_TO_PHYS(m))); 541 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 542 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 543 PMAP_ENTER_LARGEPAGE, bdry_idx); 544 VM_OBJECT_WLOCK(fs->first_object); 545 vm_page_xunbusy(m); 546 if (rv != KERN_SUCCESS) { 547 res = FAULT_FAILURE; 548 goto out; 549 } 550 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 551 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 552 vm_page_wire(m + i); 553 } 554 if (fs->m_hold != NULL) { 555 *fs->m_hold = m + (fs->first_pindex - pager_first); 556 vm_page_wire(*fs->m_hold); 557 } 558 goto out; 559 } 560 561 /* 562 * The range [pager_first, pager_last] that is given to the 563 * pager is only a hint. The pager may populate any range 564 * within the object that includes the requested page index. 565 * In case the pager expanded the range, clip it to fit into 566 * the map entry. 567 */ 568 map_first = OFF_TO_IDX(fs->entry->offset); 569 if (map_first > pager_first) { 570 vm_fault_populate_cleanup(fs->first_object, pager_first, 571 map_first - 1); 572 pager_first = map_first; 573 } 574 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 575 if (map_last < pager_last) { 576 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 577 pager_last); 578 pager_last = map_last; 579 } 580 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 581 pidx <= pager_last; 582 pidx += npages, m = vm_page_next(&m[npages - 1])) { 583 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 584 585 psind = m->psind; 586 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 587 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 588 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 589 psind = 0; 590 591 npages = atop(pagesizes[psind]); 592 for (i = 0; i < npages; i++) { 593 vm_fault_populate_check_page(&m[i]); 594 vm_fault_dirty(fs, &m[i]); 595 } 596 VM_OBJECT_WUNLOCK(fs->first_object); 597 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 598 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 599 600 /* 601 * pmap_enter() may fail for a superpage mapping if additional 602 * protection policies prevent the full mapping. 603 * For example, this will happen on amd64 if the entire 604 * address range does not share the same userspace protection 605 * key. Revert to single-page mappings if this happens. 606 */ 607 MPASS(rv == KERN_SUCCESS || 608 (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 609 if (__predict_false(psind > 0 && 610 rv == KERN_PROTECTION_FAILURE)) { 611 MPASS(!fs->wired); 612 for (i = 0; i < npages; i++) { 613 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 614 &m[i], fs->prot, fs->fault_type, 0); 615 MPASS(rv == KERN_SUCCESS); 616 } 617 } 618 619 VM_OBJECT_WLOCK(fs->first_object); 620 for (i = 0; i < npages; i++) { 621 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && 622 m[i].pindex == fs->first_pindex) 623 vm_page_wire(&m[i]); 624 else 625 vm_page_activate(&m[i]); 626 if (fs->m_hold != NULL && 627 m[i].pindex == fs->first_pindex) { 628 (*fs->m_hold) = &m[i]; 629 vm_page_wire(&m[i]); 630 } 631 vm_page_xunbusy(&m[i]); 632 } 633 } 634 out: 635 curthread->td_ru.ru_majflt++; 636 return (res); 637 } 638 639 static int prot_fault_translation; 640 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 641 &prot_fault_translation, 0, 642 "Control signal to deliver on protection fault"); 643 644 /* compat definition to keep common code for signal translation */ 645 #define UCODE_PAGEFLT 12 646 #ifdef T_PAGEFLT 647 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 648 #endif 649 650 /* 651 * vm_fault_trap: 652 * 653 * Handle a page fault occurring at the given address, 654 * requiring the given permissions, in the map specified. 655 * If successful, the page is inserted into the 656 * associated physical map. 657 * 658 * NOTE: the given address should be truncated to the 659 * proper page address. 660 * 661 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 662 * a standard error specifying why the fault is fatal is returned. 663 * 664 * The map in question must be referenced, and remains so. 665 * Caller may hold no locks. 666 */ 667 int 668 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 669 int fault_flags, int *signo, int *ucode) 670 { 671 int result; 672 673 MPASS(signo == NULL || ucode != NULL); 674 #ifdef KTRACE 675 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 676 ktrfault(vaddr, fault_type); 677 #endif 678 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 679 NULL); 680 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 681 result == KERN_INVALID_ADDRESS || 682 result == KERN_RESOURCE_SHORTAGE || 683 result == KERN_PROTECTION_FAILURE || 684 result == KERN_OUT_OF_BOUNDS, 685 ("Unexpected Mach error %d from vm_fault()", result)); 686 #ifdef KTRACE 687 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 688 ktrfaultend(result); 689 #endif 690 if (result != KERN_SUCCESS && signo != NULL) { 691 switch (result) { 692 case KERN_FAILURE: 693 case KERN_INVALID_ADDRESS: 694 *signo = SIGSEGV; 695 *ucode = SEGV_MAPERR; 696 break; 697 case KERN_RESOURCE_SHORTAGE: 698 *signo = SIGBUS; 699 *ucode = BUS_OOMERR; 700 break; 701 case KERN_OUT_OF_BOUNDS: 702 *signo = SIGBUS; 703 *ucode = BUS_OBJERR; 704 break; 705 case KERN_PROTECTION_FAILURE: 706 if (prot_fault_translation == 0) { 707 /* 708 * Autodetect. This check also covers 709 * the images without the ABI-tag ELF 710 * note. 711 */ 712 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 713 curproc->p_osrel >= P_OSREL_SIGSEGV) { 714 *signo = SIGSEGV; 715 *ucode = SEGV_ACCERR; 716 } else { 717 *signo = SIGBUS; 718 *ucode = UCODE_PAGEFLT; 719 } 720 } else if (prot_fault_translation == 1) { 721 /* Always compat mode. */ 722 *signo = SIGBUS; 723 *ucode = UCODE_PAGEFLT; 724 } else { 725 /* Always SIGSEGV mode. */ 726 *signo = SIGSEGV; 727 *ucode = SEGV_ACCERR; 728 } 729 break; 730 default: 731 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 732 result)); 733 break; 734 } 735 } 736 return (result); 737 } 738 739 static enum fault_status 740 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 741 { 742 struct vnode *vp; 743 int error, locked; 744 745 if (fs->object->type != OBJT_VNODE) 746 return (FAULT_CONTINUE); 747 vp = fs->object->handle; 748 if (vp == fs->vp) { 749 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 750 return (FAULT_CONTINUE); 751 } 752 753 /* 754 * Perform an unlock in case the desired vnode changed while 755 * the map was unlocked during a retry. 756 */ 757 unlock_vp(fs); 758 759 locked = VOP_ISLOCKED(vp); 760 if (locked != LK_EXCLUSIVE) 761 locked = LK_SHARED; 762 763 /* 764 * We must not sleep acquiring the vnode lock while we have 765 * the page exclusive busied or the object's 766 * paging-in-progress count incremented. Otherwise, we could 767 * deadlock. 768 */ 769 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 770 if (error == 0) { 771 fs->vp = vp; 772 return (FAULT_CONTINUE); 773 } 774 775 vhold(vp); 776 if (objlocked) 777 unlock_and_deallocate(fs); 778 else 779 fault_deallocate(fs); 780 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 781 vdrop(vp); 782 fs->vp = vp; 783 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 784 return (FAULT_RESTART); 785 } 786 787 /* 788 * Calculate the desired readahead. Handle drop-behind. 789 * 790 * Returns the number of readahead blocks to pass to the pager. 791 */ 792 static int 793 vm_fault_readahead(struct faultstate *fs) 794 { 795 int era, nera; 796 u_char behavior; 797 798 KASSERT(fs->lookup_still_valid, ("map unlocked")); 799 era = fs->entry->read_ahead; 800 behavior = vm_map_entry_behavior(fs->entry); 801 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 802 nera = 0; 803 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 804 nera = VM_FAULT_READ_AHEAD_MAX; 805 if (fs->vaddr == fs->entry->next_read) 806 vm_fault_dontneed(fs, fs->vaddr, nera); 807 } else if (fs->vaddr == fs->entry->next_read) { 808 /* 809 * This is a sequential fault. Arithmetically 810 * increase the requested number of pages in 811 * the read-ahead window. The requested 812 * number of pages is "# of sequential faults 813 * x (read ahead min + 1) + read ahead min" 814 */ 815 nera = VM_FAULT_READ_AHEAD_MIN; 816 if (era > 0) { 817 nera += era + 1; 818 if (nera > VM_FAULT_READ_AHEAD_MAX) 819 nera = VM_FAULT_READ_AHEAD_MAX; 820 } 821 if (era == VM_FAULT_READ_AHEAD_MAX) 822 vm_fault_dontneed(fs, fs->vaddr, nera); 823 } else { 824 /* 825 * This is a non-sequential fault. 826 */ 827 nera = 0; 828 } 829 if (era != nera) { 830 /* 831 * A read lock on the map suffices to update 832 * the read ahead count safely. 833 */ 834 fs->entry->read_ahead = nera; 835 } 836 837 return (nera); 838 } 839 840 static int 841 vm_fault_lookup(struct faultstate *fs) 842 { 843 int result; 844 845 KASSERT(!fs->lookup_still_valid, 846 ("vm_fault_lookup: Map already locked.")); 847 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 848 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 849 &fs->first_pindex, &fs->prot, &fs->wired); 850 if (result != KERN_SUCCESS) { 851 unlock_vp(fs); 852 return (result); 853 } 854 855 fs->map_generation = fs->map->timestamp; 856 857 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 858 panic("%s: fault on nofault entry, addr: %#lx", 859 __func__, (u_long)fs->vaddr); 860 } 861 862 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 863 fs->entry->wiring_thread != curthread) { 864 vm_map_unlock_read(fs->map); 865 vm_map_lock(fs->map); 866 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 867 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 868 unlock_vp(fs); 869 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 870 vm_map_unlock_and_wait(fs->map, 0); 871 } else 872 vm_map_unlock(fs->map); 873 return (KERN_RESOURCE_SHORTAGE); 874 } 875 876 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 877 878 if (fs->wired) 879 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 880 else 881 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 882 ("!fs->wired && VM_FAULT_WIRE")); 883 fs->lookup_still_valid = true; 884 885 return (KERN_SUCCESS); 886 } 887 888 static int 889 vm_fault_relookup(struct faultstate *fs) 890 { 891 vm_object_t retry_object; 892 vm_pindex_t retry_pindex; 893 vm_prot_t retry_prot; 894 int result; 895 896 if (!vm_map_trylock_read(fs->map)) 897 return (KERN_RESTART); 898 899 fs->lookup_still_valid = true; 900 if (fs->map->timestamp == fs->map_generation) 901 return (KERN_SUCCESS); 902 903 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 904 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 905 &fs->wired); 906 if (result != KERN_SUCCESS) { 907 /* 908 * If retry of map lookup would have blocked then 909 * retry fault from start. 910 */ 911 if (result == KERN_FAILURE) 912 return (KERN_RESTART); 913 return (result); 914 } 915 if (retry_object != fs->first_object || 916 retry_pindex != fs->first_pindex) 917 return (KERN_RESTART); 918 919 /* 920 * Check whether the protection has changed or the object has 921 * been copied while we left the map unlocked. Changing from 922 * read to write permission is OK - we leave the page 923 * write-protected, and catch the write fault. Changing from 924 * write to read permission means that we can't mark the page 925 * write-enabled after all. 926 */ 927 fs->prot &= retry_prot; 928 fs->fault_type &= retry_prot; 929 if (fs->prot == 0) 930 return (KERN_RESTART); 931 932 /* Reassert because wired may have changed. */ 933 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 934 ("!wired && VM_FAULT_WIRE")); 935 936 return (KERN_SUCCESS); 937 } 938 939 static void 940 vm_fault_cow(struct faultstate *fs) 941 { 942 bool is_first_object_locked; 943 944 KASSERT(fs->object != fs->first_object, 945 ("source and target COW objects are identical")); 946 947 /* 948 * This allows pages to be virtually copied from a backing_object 949 * into the first_object, where the backing object has no other 950 * refs to it, and cannot gain any more refs. Instead of a bcopy, 951 * we just move the page from the backing object to the first 952 * object. Note that we must mark the page dirty in the first 953 * object so that it will go out to swap when needed. 954 */ 955 is_first_object_locked = false; 956 if ( 957 /* 958 * Only one shadow object and no other refs. 959 */ 960 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 961 /* 962 * No other ways to look the object up 963 */ 964 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 965 /* 966 * We don't chase down the shadow chain and we can acquire locks. 967 */ 968 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 969 fs->object == fs->first_object->backing_object && 970 VM_OBJECT_TRYWLOCK(fs->object)) { 971 /* 972 * Remove but keep xbusy for replace. fs->m is moved into 973 * fs->first_object and left busy while fs->first_m is 974 * conditionally freed. 975 */ 976 vm_page_remove_xbusy(fs->m); 977 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 978 fs->first_m); 979 vm_page_dirty(fs->m); 980 #if VM_NRESERVLEVEL > 0 981 /* 982 * Rename the reservation. 983 */ 984 vm_reserv_rename(fs->m, fs->first_object, fs->object, 985 OFF_TO_IDX(fs->first_object->backing_object_offset)); 986 #endif 987 VM_OBJECT_WUNLOCK(fs->object); 988 VM_OBJECT_WUNLOCK(fs->first_object); 989 fs->first_m = fs->m; 990 fs->m = NULL; 991 VM_CNT_INC(v_cow_optim); 992 } else { 993 if (is_first_object_locked) 994 VM_OBJECT_WUNLOCK(fs->first_object); 995 /* 996 * Oh, well, lets copy it. 997 */ 998 pmap_copy_page(fs->m, fs->first_m); 999 vm_page_valid(fs->first_m); 1000 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 1001 vm_page_wire(fs->first_m); 1002 vm_page_unwire(fs->m, PQ_INACTIVE); 1003 } 1004 /* 1005 * Save the cow page to be released after 1006 * pmap_enter is complete. 1007 */ 1008 fs->m_cow = fs->m; 1009 fs->m = NULL; 1010 1011 /* 1012 * Typically, the shadow object is either private to this 1013 * address space (OBJ_ONEMAPPING) or its pages are read only. 1014 * In the highly unusual case where the pages of a shadow object 1015 * are read/write shared between this and other address spaces, 1016 * we need to ensure that any pmap-level mappings to the 1017 * original, copy-on-write page from the backing object are 1018 * removed from those other address spaces. 1019 * 1020 * The flag check is racy, but this is tolerable: if 1021 * OBJ_ONEMAPPING is cleared after the check, the busy state 1022 * ensures that new mappings of m_cow can't be created. 1023 * pmap_enter() will replace an existing mapping in the current 1024 * address space. If OBJ_ONEMAPPING is set after the check, 1025 * removing mappings will at worse trigger some unnecessary page 1026 * faults. 1027 */ 1028 vm_page_assert_xbusied(fs->m_cow); 1029 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1030 pmap_remove_all(fs->m_cow); 1031 } 1032 1033 vm_object_pip_wakeup(fs->object); 1034 1035 /* 1036 * Only use the new page below... 1037 */ 1038 fs->object = fs->first_object; 1039 fs->pindex = fs->first_pindex; 1040 fs->m = fs->first_m; 1041 VM_CNT_INC(v_cow_faults); 1042 curthread->td_cow++; 1043 } 1044 1045 static bool 1046 vm_fault_next(struct faultstate *fs) 1047 { 1048 vm_object_t next_object; 1049 1050 /* 1051 * The requested page does not exist at this object/ 1052 * offset. Remove the invalid page from the object, 1053 * waking up anyone waiting for it, and continue on to 1054 * the next object. However, if this is the top-level 1055 * object, we must leave the busy page in place to 1056 * prevent another process from rushing past us, and 1057 * inserting the page in that object at the same time 1058 * that we are. 1059 */ 1060 if (fs->object == fs->first_object) { 1061 fs->first_m = fs->m; 1062 fs->m = NULL; 1063 } else 1064 fault_page_free(&fs->m); 1065 1066 /* 1067 * Move on to the next object. Lock the next object before 1068 * unlocking the current one. 1069 */ 1070 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1071 next_object = fs->object->backing_object; 1072 if (next_object == NULL) 1073 return (false); 1074 MPASS(fs->first_m != NULL); 1075 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1076 VM_OBJECT_WLOCK(next_object); 1077 vm_object_pip_add(next_object, 1); 1078 if (fs->object != fs->first_object) 1079 vm_object_pip_wakeup(fs->object); 1080 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1081 VM_OBJECT_WUNLOCK(fs->object); 1082 fs->object = next_object; 1083 1084 return (true); 1085 } 1086 1087 static void 1088 vm_fault_zerofill(struct faultstate *fs) 1089 { 1090 1091 /* 1092 * If there's no object left, fill the page in the top 1093 * object with zeros. 1094 */ 1095 if (fs->object != fs->first_object) { 1096 vm_object_pip_wakeup(fs->object); 1097 fs->object = fs->first_object; 1098 fs->pindex = fs->first_pindex; 1099 } 1100 MPASS(fs->first_m != NULL); 1101 MPASS(fs->m == NULL); 1102 fs->m = fs->first_m; 1103 fs->first_m = NULL; 1104 1105 /* 1106 * Zero the page if necessary and mark it valid. 1107 */ 1108 if ((fs->m->flags & PG_ZERO) == 0) { 1109 pmap_zero_page(fs->m); 1110 } else { 1111 VM_CNT_INC(v_ozfod); 1112 } 1113 VM_CNT_INC(v_zfod); 1114 vm_page_valid(fs->m); 1115 } 1116 1117 /* 1118 * Initiate page fault after timeout. Returns true if caller should 1119 * do vm_waitpfault() after the call. 1120 */ 1121 static bool 1122 vm_fault_allocate_oom(struct faultstate *fs) 1123 { 1124 struct timeval now; 1125 1126 unlock_and_deallocate(fs); 1127 if (vm_pfault_oom_attempts < 0) 1128 return (true); 1129 if (!fs->oom_started) { 1130 fs->oom_started = true; 1131 getmicrotime(&fs->oom_start_time); 1132 return (true); 1133 } 1134 1135 getmicrotime(&now); 1136 timevalsub(&now, &fs->oom_start_time); 1137 if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1138 return (true); 1139 1140 if (bootverbose) 1141 printf( 1142 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1143 curproc->p_pid, curproc->p_comm); 1144 vm_pageout_oom(VM_OOM_MEM_PF); 1145 fs->oom_started = false; 1146 return (false); 1147 } 1148 1149 /* 1150 * Allocate a page directly or via the object populate method. 1151 */ 1152 static enum fault_status 1153 vm_fault_allocate(struct faultstate *fs) 1154 { 1155 struct domainset *dset; 1156 enum fault_status res; 1157 1158 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1159 res = vm_fault_lock_vnode(fs, true); 1160 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1161 if (res == FAULT_RESTART) 1162 return (res); 1163 } 1164 1165 if (fs->pindex >= fs->object->size) { 1166 unlock_and_deallocate(fs); 1167 return (FAULT_OUT_OF_BOUNDS); 1168 } 1169 1170 if (fs->object == fs->first_object && 1171 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1172 fs->first_object->shadow_count == 0) { 1173 res = vm_fault_populate(fs); 1174 switch (res) { 1175 case FAULT_SUCCESS: 1176 case FAULT_FAILURE: 1177 case FAULT_RESTART: 1178 unlock_and_deallocate(fs); 1179 return (res); 1180 case FAULT_CONTINUE: 1181 /* 1182 * Pager's populate() method 1183 * returned VM_PAGER_BAD. 1184 */ 1185 break; 1186 default: 1187 panic("inconsistent return codes"); 1188 } 1189 } 1190 1191 /* 1192 * Allocate a new page for this object/offset pair. 1193 * 1194 * If the process has a fatal signal pending, prioritize the allocation 1195 * with the expectation that the process will exit shortly and free some 1196 * pages. In particular, the signal may have been posted by the page 1197 * daemon in an attempt to resolve an out-of-memory condition. 1198 * 1199 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1200 * might be not observed here, and allocation fails, causing a restart 1201 * and new reading of the p_flag. 1202 */ 1203 dset = fs->object->domain.dr_policy; 1204 if (dset == NULL) 1205 dset = curthread->td_domain.dr_policy; 1206 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1207 #if VM_NRESERVLEVEL > 0 1208 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1209 #endif 1210 if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) { 1211 unlock_and_deallocate(fs); 1212 return (FAULT_FAILURE); 1213 } 1214 fs->m = vm_page_alloc(fs->object, fs->pindex, 1215 P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0); 1216 } 1217 if (fs->m == NULL) { 1218 if (vm_fault_allocate_oom(fs)) 1219 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1220 return (FAULT_RESTART); 1221 } 1222 fs->oom_started = false; 1223 1224 return (FAULT_CONTINUE); 1225 } 1226 1227 /* 1228 * Call the pager to retrieve the page if there is a chance 1229 * that the pager has it, and potentially retrieve additional 1230 * pages at the same time. 1231 */ 1232 static enum fault_status 1233 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 1234 { 1235 vm_offset_t e_end, e_start; 1236 int ahead, behind, cluster_offset, rv; 1237 enum fault_status status; 1238 u_char behavior; 1239 1240 /* 1241 * Prepare for unlocking the map. Save the map 1242 * entry's start and end addresses, which are used to 1243 * optimize the size of the pager operation below. 1244 * Even if the map entry's addresses change after 1245 * unlocking the map, using the saved addresses is 1246 * safe. 1247 */ 1248 e_start = fs->entry->start; 1249 e_end = fs->entry->end; 1250 behavior = vm_map_entry_behavior(fs->entry); 1251 1252 /* 1253 * If the pager for the current object might have 1254 * the page, then determine the number of additional 1255 * pages to read and potentially reprioritize 1256 * previously read pages for earlier reclamation. 1257 * These operations should only be performed once per 1258 * page fault. Even if the current pager doesn't 1259 * have the page, the number of additional pages to 1260 * read will apply to subsequent objects in the 1261 * shadow chain. 1262 */ 1263 if (fs->nera == -1 && !P_KILLED(curproc)) 1264 fs->nera = vm_fault_readahead(fs); 1265 1266 /* 1267 * Release the map lock before locking the vnode or 1268 * sleeping in the pager. (If the current object has 1269 * a shadow, then an earlier iteration of this loop 1270 * may have already unlocked the map.) 1271 */ 1272 unlock_map(fs); 1273 1274 status = vm_fault_lock_vnode(fs, false); 1275 MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1276 if (status == FAULT_RESTART) 1277 return (status); 1278 KASSERT(fs->vp == NULL || !fs->map->system_map, 1279 ("vm_fault: vnode-backed object mapped by system map")); 1280 1281 /* 1282 * Page in the requested page and hint the pager, 1283 * that it may bring up surrounding pages. 1284 */ 1285 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1286 P_KILLED(curproc)) { 1287 behind = 0; 1288 ahead = 0; 1289 } else { 1290 /* Is this a sequential fault? */ 1291 if (fs->nera > 0) { 1292 behind = 0; 1293 ahead = fs->nera; 1294 } else { 1295 /* 1296 * Request a cluster of pages that is 1297 * aligned to a VM_FAULT_READ_DEFAULT 1298 * page offset boundary within the 1299 * object. Alignment to a page offset 1300 * boundary is more likely to coincide 1301 * with the underlying file system 1302 * block than alignment to a virtual 1303 * address boundary. 1304 */ 1305 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1306 behind = ulmin(cluster_offset, 1307 atop(fs->vaddr - e_start)); 1308 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1309 } 1310 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1311 } 1312 *behindp = behind; 1313 *aheadp = ahead; 1314 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1315 if (rv == VM_PAGER_OK) 1316 return (FAULT_HARD); 1317 if (rv == VM_PAGER_ERROR) 1318 printf("vm_fault: pager read error, pid %d (%s)\n", 1319 curproc->p_pid, curproc->p_comm); 1320 /* 1321 * If an I/O error occurred or the requested page was 1322 * outside the range of the pager, clean up and return 1323 * an error. 1324 */ 1325 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1326 VM_OBJECT_WLOCK(fs->object); 1327 fault_page_free(&fs->m); 1328 unlock_and_deallocate(fs); 1329 return (FAULT_OUT_OF_BOUNDS); 1330 } 1331 KASSERT(rv == VM_PAGER_FAIL, 1332 ("%s: unexpected pager error %d", __func__, rv)); 1333 return (FAULT_CONTINUE); 1334 } 1335 1336 /* 1337 * Wait/Retry if the page is busy. We have to do this if the page is 1338 * either exclusive or shared busy because the vm_pager may be using 1339 * read busy for pageouts (and even pageins if it is the vnode pager), 1340 * and we could end up trying to pagein and pageout the same page 1341 * simultaneously. 1342 * 1343 * We can theoretically allow the busy case on a read fault if the page 1344 * is marked valid, but since such pages are typically already pmap'd, 1345 * putting that special case in might be more effort then it is worth. 1346 * We cannot under any circumstances mess around with a shared busied 1347 * page except, perhaps, to pmap it. 1348 */ 1349 static void 1350 vm_fault_busy_sleep(struct faultstate *fs) 1351 { 1352 /* 1353 * Reference the page before unlocking and 1354 * sleeping so that the page daemon is less 1355 * likely to reclaim it. 1356 */ 1357 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1358 if (fs->object != fs->first_object) { 1359 fault_page_release(&fs->first_m); 1360 vm_object_pip_wakeup(fs->first_object); 1361 } 1362 vm_object_pip_wakeup(fs->object); 1363 unlock_map(fs); 1364 if (fs->m != vm_page_lookup(fs->object, fs->pindex) || 1365 !vm_page_busy_sleep(fs->m, "vmpfw", 0)) 1366 VM_OBJECT_WUNLOCK(fs->object); 1367 VM_CNT_INC(v_intrans); 1368 vm_object_deallocate(fs->first_object); 1369 } 1370 1371 /* 1372 * Handle page lookup, populate, allocate, page-in for the current 1373 * object. 1374 * 1375 * The object is locked on entry and will remain locked with a return 1376 * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1377 * Otherwise, the object will be unlocked upon return. 1378 */ 1379 static enum fault_status 1380 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1381 { 1382 enum fault_status res; 1383 bool dead; 1384 1385 /* 1386 * If the object is marked for imminent termination, we retry 1387 * here, since the collapse pass has raced with us. Otherwise, 1388 * if we see terminally dead object, return fail. 1389 */ 1390 if ((fs->object->flags & OBJ_DEAD) != 0) { 1391 dead = fs->object->type == OBJT_DEAD; 1392 unlock_and_deallocate(fs); 1393 if (dead) 1394 return (FAULT_PROTECTION_FAILURE); 1395 pause("vmf_de", 1); 1396 return (FAULT_RESTART); 1397 } 1398 1399 /* 1400 * See if the page is resident. 1401 */ 1402 fs->m = vm_page_lookup(fs->object, fs->pindex); 1403 if (fs->m != NULL) { 1404 if (!vm_page_tryxbusy(fs->m)) { 1405 vm_fault_busy_sleep(fs); 1406 return (FAULT_RESTART); 1407 } 1408 1409 /* 1410 * The page is marked busy for other processes and the 1411 * pagedaemon. If it is still completely valid we are 1412 * done. 1413 */ 1414 if (vm_page_all_valid(fs->m)) { 1415 VM_OBJECT_WUNLOCK(fs->object); 1416 return (FAULT_SOFT); 1417 } 1418 } 1419 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1420 1421 /* 1422 * Page is not resident. If the pager might contain the page 1423 * or this is the beginning of the search, allocate a new 1424 * page. 1425 */ 1426 if (fs->m == NULL && (fault_object_needs_getpages(fs->object) || 1427 fs->object == fs->first_object)) { 1428 res = vm_fault_allocate(fs); 1429 if (res != FAULT_CONTINUE) 1430 return (res); 1431 } 1432 1433 /* 1434 * Default objects have no pager so no exclusive busy exists 1435 * to protect this page in the chain. Skip to the next 1436 * object without dropping the lock to preserve atomicity of 1437 * shadow faults. 1438 */ 1439 if (fault_object_needs_getpages(fs->object)) { 1440 /* 1441 * At this point, we have either allocated a new page 1442 * or found an existing page that is only partially 1443 * valid. 1444 * 1445 * We hold a reference on the current object and the 1446 * page is exclusive busied. The exclusive busy 1447 * prevents simultaneous faults and collapses while 1448 * the object lock is dropped. 1449 */ 1450 VM_OBJECT_WUNLOCK(fs->object); 1451 res = vm_fault_getpages(fs, behindp, aheadp); 1452 if (res == FAULT_CONTINUE) 1453 VM_OBJECT_WLOCK(fs->object); 1454 } else { 1455 res = FAULT_CONTINUE; 1456 } 1457 return (res); 1458 } 1459 1460 int 1461 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1462 int fault_flags, vm_page_t *m_hold) 1463 { 1464 struct faultstate fs; 1465 int ahead, behind, faultcount, rv; 1466 enum fault_status res; 1467 bool hardfault; 1468 1469 VM_CNT_INC(v_vm_faults); 1470 1471 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1472 return (KERN_PROTECTION_FAILURE); 1473 1474 fs.vp = NULL; 1475 fs.vaddr = vaddr; 1476 fs.m_hold = m_hold; 1477 fs.fault_flags = fault_flags; 1478 fs.map = map; 1479 fs.lookup_still_valid = false; 1480 fs.oom_started = false; 1481 fs.nera = -1; 1482 faultcount = 0; 1483 hardfault = false; 1484 1485 RetryFault: 1486 fs.fault_type = fault_type; 1487 1488 /* 1489 * Find the backing store object and offset into it to begin the 1490 * search. 1491 */ 1492 rv = vm_fault_lookup(&fs); 1493 if (rv != KERN_SUCCESS) { 1494 if (rv == KERN_RESOURCE_SHORTAGE) 1495 goto RetryFault; 1496 return (rv); 1497 } 1498 1499 /* 1500 * Try to avoid lock contention on the top-level object through 1501 * special-case handling of some types of page faults, specifically, 1502 * those that are mapping an existing page from the top-level object. 1503 * Under this condition, a read lock on the object suffices, allowing 1504 * multiple page faults of a similar type to run in parallel. 1505 */ 1506 if (fs.vp == NULL /* avoid locked vnode leak */ && 1507 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1508 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1509 VM_OBJECT_RLOCK(fs.first_object); 1510 res = vm_fault_soft_fast(&fs); 1511 if (res == FAULT_SUCCESS) 1512 return (KERN_SUCCESS); 1513 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1514 VM_OBJECT_RUNLOCK(fs.first_object); 1515 VM_OBJECT_WLOCK(fs.first_object); 1516 } 1517 } else { 1518 VM_OBJECT_WLOCK(fs.first_object); 1519 } 1520 1521 /* 1522 * Make a reference to this object to prevent its disposal while we 1523 * are messing with it. Once we have the reference, the map is free 1524 * to be diddled. Since objects reference their shadows (and copies), 1525 * they will stay around as well. 1526 * 1527 * Bump the paging-in-progress count to prevent size changes (e.g. 1528 * truncation operations) during I/O. 1529 */ 1530 vm_object_reference_locked(fs.first_object); 1531 vm_object_pip_add(fs.first_object, 1); 1532 1533 fs.m_cow = fs.m = fs.first_m = NULL; 1534 1535 /* 1536 * Search for the page at object/offset. 1537 */ 1538 fs.object = fs.first_object; 1539 fs.pindex = fs.first_pindex; 1540 1541 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1542 res = vm_fault_allocate(&fs); 1543 switch (res) { 1544 case FAULT_RESTART: 1545 goto RetryFault; 1546 case FAULT_SUCCESS: 1547 return (KERN_SUCCESS); 1548 case FAULT_FAILURE: 1549 return (KERN_FAILURE); 1550 case FAULT_OUT_OF_BOUNDS: 1551 return (KERN_OUT_OF_BOUNDS); 1552 case FAULT_CONTINUE: 1553 break; 1554 default: 1555 panic("vm_fault: Unhandled status %d", res); 1556 } 1557 } 1558 1559 while (TRUE) { 1560 KASSERT(fs.m == NULL, 1561 ("page still set %p at loop start", fs.m)); 1562 1563 res = vm_fault_object(&fs, &behind, &ahead); 1564 switch (res) { 1565 case FAULT_SOFT: 1566 goto found; 1567 case FAULT_HARD: 1568 faultcount = behind + 1 + ahead; 1569 hardfault = true; 1570 goto found; 1571 case FAULT_RESTART: 1572 goto RetryFault; 1573 case FAULT_SUCCESS: 1574 return (KERN_SUCCESS); 1575 case FAULT_FAILURE: 1576 return (KERN_FAILURE); 1577 case FAULT_OUT_OF_BOUNDS: 1578 return (KERN_OUT_OF_BOUNDS); 1579 case FAULT_PROTECTION_FAILURE: 1580 return (KERN_PROTECTION_FAILURE); 1581 case FAULT_CONTINUE: 1582 break; 1583 default: 1584 panic("vm_fault: Unhandled status %d", res); 1585 } 1586 1587 /* 1588 * The page was not found in the current object. Try to 1589 * traverse into a backing object or zero fill if none is 1590 * found. 1591 */ 1592 if (vm_fault_next(&fs)) 1593 continue; 1594 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1595 if (fs.first_object == fs.object) 1596 fault_page_free(&fs.first_m); 1597 unlock_and_deallocate(&fs); 1598 return (KERN_OUT_OF_BOUNDS); 1599 } 1600 VM_OBJECT_WUNLOCK(fs.object); 1601 vm_fault_zerofill(&fs); 1602 /* Don't try to prefault neighboring pages. */ 1603 faultcount = 1; 1604 break; 1605 } 1606 1607 found: 1608 /* 1609 * A valid page has been found and exclusively busied. The 1610 * object lock must no longer be held. 1611 */ 1612 vm_page_assert_xbusied(fs.m); 1613 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1614 1615 /* 1616 * If the page is being written, but isn't already owned by the 1617 * top-level object, we have to copy it into a new page owned by the 1618 * top-level object. 1619 */ 1620 if (fs.object != fs.first_object) { 1621 /* 1622 * We only really need to copy if we want to write it. 1623 */ 1624 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1625 vm_fault_cow(&fs); 1626 /* 1627 * We only try to prefault read-only mappings to the 1628 * neighboring pages when this copy-on-write fault is 1629 * a hard fault. In other cases, trying to prefault 1630 * is typically wasted effort. 1631 */ 1632 if (faultcount == 0) 1633 faultcount = 1; 1634 1635 } else { 1636 fs.prot &= ~VM_PROT_WRITE; 1637 } 1638 } 1639 1640 /* 1641 * We must verify that the maps have not changed since our last 1642 * lookup. 1643 */ 1644 if (!fs.lookup_still_valid) { 1645 rv = vm_fault_relookup(&fs); 1646 if (rv != KERN_SUCCESS) { 1647 fault_deallocate(&fs); 1648 if (rv == KERN_RESTART) 1649 goto RetryFault; 1650 return (rv); 1651 } 1652 } 1653 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1654 1655 /* 1656 * If the page was filled by a pager, save the virtual address that 1657 * should be faulted on next under a sequential access pattern to the 1658 * map entry. A read lock on the map suffices to update this address 1659 * safely. 1660 */ 1661 if (hardfault) 1662 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1663 1664 /* 1665 * Page must be completely valid or it is not fit to 1666 * map into user space. vm_pager_get_pages() ensures this. 1667 */ 1668 vm_page_assert_xbusied(fs.m); 1669 KASSERT(vm_page_all_valid(fs.m), 1670 ("vm_fault: page %p partially invalid", fs.m)); 1671 1672 vm_fault_dirty(&fs, fs.m); 1673 1674 /* 1675 * Put this page into the physical map. We had to do the unlock above 1676 * because pmap_enter() may sleep. We don't put the page 1677 * back on the active queue until later so that the pageout daemon 1678 * won't find it (yet). 1679 */ 1680 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1681 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1682 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1683 fs.wired == 0) 1684 vm_fault_prefault(&fs, vaddr, 1685 faultcount > 0 ? behind : PFBAK, 1686 faultcount > 0 ? ahead : PFFOR, false); 1687 1688 /* 1689 * If the page is not wired down, then put it where the pageout daemon 1690 * can find it. 1691 */ 1692 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1693 vm_page_wire(fs.m); 1694 else 1695 vm_page_activate(fs.m); 1696 if (fs.m_hold != NULL) { 1697 (*fs.m_hold) = fs.m; 1698 vm_page_wire(fs.m); 1699 } 1700 vm_page_xunbusy(fs.m); 1701 fs.m = NULL; 1702 1703 /* 1704 * Unlock everything, and return 1705 */ 1706 fault_deallocate(&fs); 1707 if (hardfault) { 1708 VM_CNT_INC(v_io_faults); 1709 curthread->td_ru.ru_majflt++; 1710 #ifdef RACCT 1711 if (racct_enable && fs.object->type == OBJT_VNODE) { 1712 PROC_LOCK(curproc); 1713 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1714 racct_add_force(curproc, RACCT_WRITEBPS, 1715 PAGE_SIZE + behind * PAGE_SIZE); 1716 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1717 } else { 1718 racct_add_force(curproc, RACCT_READBPS, 1719 PAGE_SIZE + ahead * PAGE_SIZE); 1720 racct_add_force(curproc, RACCT_READIOPS, 1); 1721 } 1722 PROC_UNLOCK(curproc); 1723 } 1724 #endif 1725 } else 1726 curthread->td_ru.ru_minflt++; 1727 1728 return (KERN_SUCCESS); 1729 } 1730 1731 /* 1732 * Speed up the reclamation of pages that precede the faulting pindex within 1733 * the first object of the shadow chain. Essentially, perform the equivalent 1734 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1735 * the faulting pindex by the cluster size when the pages read by vm_fault() 1736 * cross a cluster-size boundary. The cluster size is the greater of the 1737 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1738 * 1739 * When "fs->first_object" is a shadow object, the pages in the backing object 1740 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1741 * function must only be concerned with pages in the first object. 1742 */ 1743 static void 1744 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1745 { 1746 vm_map_entry_t entry; 1747 vm_object_t first_object; 1748 vm_offset_t end, start; 1749 vm_page_t m, m_next; 1750 vm_pindex_t pend, pstart; 1751 vm_size_t size; 1752 1753 VM_OBJECT_ASSERT_UNLOCKED(fs->object); 1754 first_object = fs->first_object; 1755 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1756 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1757 VM_OBJECT_RLOCK(first_object); 1758 size = VM_FAULT_DONTNEED_MIN; 1759 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1760 size = pagesizes[1]; 1761 end = rounddown2(vaddr, size); 1762 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1763 (entry = fs->entry)->start < end) { 1764 if (end - entry->start < size) 1765 start = entry->start; 1766 else 1767 start = end - size; 1768 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1769 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1770 entry->start); 1771 m_next = vm_page_find_least(first_object, pstart); 1772 pend = OFF_TO_IDX(entry->offset) + atop(end - 1773 entry->start); 1774 while ((m = m_next) != NULL && m->pindex < pend) { 1775 m_next = TAILQ_NEXT(m, listq); 1776 if (!vm_page_all_valid(m) || 1777 vm_page_busied(m)) 1778 continue; 1779 1780 /* 1781 * Don't clear PGA_REFERENCED, since it would 1782 * likely represent a reference by a different 1783 * process. 1784 * 1785 * Typically, at this point, prefetched pages 1786 * are still in the inactive queue. Only 1787 * pages that triggered page faults are in the 1788 * active queue. The test for whether the page 1789 * is in the inactive queue is racy; in the 1790 * worst case we will requeue the page 1791 * unnecessarily. 1792 */ 1793 if (!vm_page_inactive(m)) 1794 vm_page_deactivate(m); 1795 } 1796 } 1797 VM_OBJECT_RUNLOCK(first_object); 1798 } 1799 } 1800 1801 /* 1802 * vm_fault_prefault provides a quick way of clustering 1803 * pagefaults into a processes address space. It is a "cousin" 1804 * of vm_map_pmap_enter, except it runs at page fault time instead 1805 * of mmap time. 1806 */ 1807 static void 1808 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1809 int backward, int forward, bool obj_locked) 1810 { 1811 pmap_t pmap; 1812 vm_map_entry_t entry; 1813 vm_object_t backing_object, lobject; 1814 vm_offset_t addr, starta; 1815 vm_pindex_t pindex; 1816 vm_page_t m; 1817 int i; 1818 1819 pmap = fs->map->pmap; 1820 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1821 return; 1822 1823 entry = fs->entry; 1824 1825 if (addra < backward * PAGE_SIZE) { 1826 starta = entry->start; 1827 } else { 1828 starta = addra - backward * PAGE_SIZE; 1829 if (starta < entry->start) 1830 starta = entry->start; 1831 } 1832 1833 /* 1834 * Generate the sequence of virtual addresses that are candidates for 1835 * prefaulting in an outward spiral from the faulting virtual address, 1836 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1837 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1838 * If the candidate address doesn't have a backing physical page, then 1839 * the loop immediately terminates. 1840 */ 1841 for (i = 0; i < 2 * imax(backward, forward); i++) { 1842 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1843 PAGE_SIZE); 1844 if (addr > addra + forward * PAGE_SIZE) 1845 addr = 0; 1846 1847 if (addr < starta || addr >= entry->end) 1848 continue; 1849 1850 if (!pmap_is_prefaultable(pmap, addr)) 1851 continue; 1852 1853 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1854 lobject = entry->object.vm_object; 1855 if (!obj_locked) 1856 VM_OBJECT_RLOCK(lobject); 1857 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1858 !fault_object_needs_getpages(lobject) && 1859 (backing_object = lobject->backing_object) != NULL) { 1860 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1861 0, ("vm_fault_prefault: unaligned object offset")); 1862 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1863 VM_OBJECT_RLOCK(backing_object); 1864 if (!obj_locked || lobject != entry->object.vm_object) 1865 VM_OBJECT_RUNLOCK(lobject); 1866 lobject = backing_object; 1867 } 1868 if (m == NULL) { 1869 if (!obj_locked || lobject != entry->object.vm_object) 1870 VM_OBJECT_RUNLOCK(lobject); 1871 break; 1872 } 1873 if (vm_page_all_valid(m) && 1874 (m->flags & PG_FICTITIOUS) == 0) 1875 pmap_enter_quick(pmap, addr, m, entry->protection); 1876 if (!obj_locked || lobject != entry->object.vm_object) 1877 VM_OBJECT_RUNLOCK(lobject); 1878 } 1879 } 1880 1881 /* 1882 * Hold each of the physical pages that are mapped by the specified range of 1883 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1884 * and allow the specified types of access, "prot". If all of the implied 1885 * pages are successfully held, then the number of held pages is returned 1886 * together with pointers to those pages in the array "ma". However, if any 1887 * of the pages cannot be held, -1 is returned. 1888 */ 1889 int 1890 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1891 vm_prot_t prot, vm_page_t *ma, int max_count) 1892 { 1893 vm_offset_t end, va; 1894 vm_page_t *mp; 1895 int count; 1896 boolean_t pmap_failed; 1897 1898 if (len == 0) 1899 return (0); 1900 end = round_page(addr + len); 1901 addr = trunc_page(addr); 1902 1903 if (!vm_map_range_valid(map, addr, end)) 1904 return (-1); 1905 1906 if (atop(end - addr) > max_count) 1907 panic("vm_fault_quick_hold_pages: count > max_count"); 1908 count = atop(end - addr); 1909 1910 /* 1911 * Most likely, the physical pages are resident in the pmap, so it is 1912 * faster to try pmap_extract_and_hold() first. 1913 */ 1914 pmap_failed = FALSE; 1915 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1916 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1917 if (*mp == NULL) 1918 pmap_failed = TRUE; 1919 else if ((prot & VM_PROT_WRITE) != 0 && 1920 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1921 /* 1922 * Explicitly dirty the physical page. Otherwise, the 1923 * caller's changes may go unnoticed because they are 1924 * performed through an unmanaged mapping or by a DMA 1925 * operation. 1926 * 1927 * The object lock is not held here. 1928 * See vm_page_clear_dirty_mask(). 1929 */ 1930 vm_page_dirty(*mp); 1931 } 1932 } 1933 if (pmap_failed) { 1934 /* 1935 * One or more pages could not be held by the pmap. Either no 1936 * page was mapped at the specified virtual address or that 1937 * mapping had insufficient permissions. Attempt to fault in 1938 * and hold these pages. 1939 * 1940 * If vm_fault_disable_pagefaults() was called, 1941 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1942 * acquire MD VM locks, which means we must not call 1943 * vm_fault(). Some (out of tree) callers mark 1944 * too wide a code area with vm_fault_disable_pagefaults() 1945 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1946 * the proper behaviour explicitly. 1947 */ 1948 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1949 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1950 goto error; 1951 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1952 if (*mp == NULL && vm_fault(map, va, prot, 1953 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1954 goto error; 1955 } 1956 return (count); 1957 error: 1958 for (mp = ma; mp < ma + count; mp++) 1959 if (*mp != NULL) 1960 vm_page_unwire(*mp, PQ_INACTIVE); 1961 return (-1); 1962 } 1963 1964 /* 1965 * Routine: 1966 * vm_fault_copy_entry 1967 * Function: 1968 * Create new object backing dst_entry with private copy of all 1969 * underlying pages. When src_entry is equal to dst_entry, function 1970 * implements COW for wired-down map entry. Otherwise, it forks 1971 * wired entry into dst_map. 1972 * 1973 * In/out conditions: 1974 * The source and destination maps must be locked for write. 1975 * The source map entry must be wired down (or be a sharing map 1976 * entry corresponding to a main map entry that is wired down). 1977 */ 1978 void 1979 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, 1980 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1981 vm_ooffset_t *fork_charge) 1982 { 1983 vm_object_t backing_object, dst_object, object, src_object; 1984 vm_pindex_t dst_pindex, pindex, src_pindex; 1985 vm_prot_t access, prot; 1986 vm_offset_t vaddr; 1987 vm_page_t dst_m; 1988 vm_page_t src_m; 1989 bool upgrade; 1990 1991 upgrade = src_entry == dst_entry; 1992 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1993 ("vm_fault_copy_entry: vm_object not NULL")); 1994 1995 /* 1996 * If not an upgrade, then enter the mappings in the pmap as 1997 * read and/or execute accesses. Otherwise, enter them as 1998 * write accesses. 1999 * 2000 * A writeable large page mapping is only created if all of 2001 * the constituent small page mappings are modified. Marking 2002 * PTEs as modified on inception allows promotion to happen 2003 * without taking potentially large number of soft faults. 2004 */ 2005 access = prot = dst_entry->protection; 2006 if (!upgrade) 2007 access &= ~VM_PROT_WRITE; 2008 2009 src_object = src_entry->object.vm_object; 2010 src_pindex = OFF_TO_IDX(src_entry->offset); 2011 2012 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2013 dst_object = src_object; 2014 vm_object_reference(dst_object); 2015 } else { 2016 /* 2017 * Create the top-level object for the destination entry. 2018 * Doesn't actually shadow anything - we copy the pages 2019 * directly. 2020 */ 2021 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 2022 dst_entry->start), NULL, NULL, 0); 2023 #if VM_NRESERVLEVEL > 0 2024 dst_object->flags |= OBJ_COLORED; 2025 dst_object->pg_color = atop(dst_entry->start); 2026 #endif 2027 dst_object->domain = src_object->domain; 2028 dst_object->charge = dst_entry->end - dst_entry->start; 2029 2030 dst_entry->object.vm_object = dst_object; 2031 dst_entry->offset = 0; 2032 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 2033 } 2034 2035 VM_OBJECT_WLOCK(dst_object); 2036 if (fork_charge != NULL) { 2037 KASSERT(dst_entry->cred == NULL, 2038 ("vm_fault_copy_entry: leaked swp charge")); 2039 dst_object->cred = curthread->td_ucred; 2040 crhold(dst_object->cred); 2041 *fork_charge += dst_object->charge; 2042 } else if ((dst_object->flags & OBJ_SWAP) != 0 && 2043 dst_object->cred == NULL) { 2044 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 2045 dst_entry)); 2046 dst_object->cred = dst_entry->cred; 2047 dst_entry->cred = NULL; 2048 } 2049 2050 /* 2051 * Loop through all of the virtual pages within the entry's 2052 * range, copying each page from the source object to the 2053 * destination object. Since the source is wired, those pages 2054 * must exist. In contrast, the destination is pageable. 2055 * Since the destination object doesn't share any backing storage 2056 * with the source object, all of its pages must be dirtied, 2057 * regardless of whether they can be written. 2058 */ 2059 for (vaddr = dst_entry->start, dst_pindex = 0; 2060 vaddr < dst_entry->end; 2061 vaddr += PAGE_SIZE, dst_pindex++) { 2062 again: 2063 /* 2064 * Find the page in the source object, and copy it in. 2065 * Because the source is wired down, the page will be 2066 * in memory. 2067 */ 2068 if (src_object != dst_object) 2069 VM_OBJECT_RLOCK(src_object); 2070 object = src_object; 2071 pindex = src_pindex + dst_pindex; 2072 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2073 (backing_object = object->backing_object) != NULL) { 2074 /* 2075 * Unless the source mapping is read-only or 2076 * it is presently being upgraded from 2077 * read-only, the first object in the shadow 2078 * chain should provide all of the pages. In 2079 * other words, this loop body should never be 2080 * executed when the source mapping is already 2081 * read/write. 2082 */ 2083 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 2084 upgrade, 2085 ("vm_fault_copy_entry: main object missing page")); 2086 2087 VM_OBJECT_RLOCK(backing_object); 2088 pindex += OFF_TO_IDX(object->backing_object_offset); 2089 if (object != dst_object) 2090 VM_OBJECT_RUNLOCK(object); 2091 object = backing_object; 2092 } 2093 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 2094 2095 if (object != dst_object) { 2096 /* 2097 * Allocate a page in the destination object. 2098 */ 2099 dst_m = vm_page_alloc(dst_object, (src_object == 2100 dst_object ? src_pindex : 0) + dst_pindex, 2101 VM_ALLOC_NORMAL); 2102 if (dst_m == NULL) { 2103 VM_OBJECT_WUNLOCK(dst_object); 2104 VM_OBJECT_RUNLOCK(object); 2105 vm_wait(dst_object); 2106 VM_OBJECT_WLOCK(dst_object); 2107 goto again; 2108 } 2109 2110 /* 2111 * See the comment in vm_fault_cow(). 2112 */ 2113 if (src_object == dst_object && 2114 (object->flags & OBJ_ONEMAPPING) == 0) 2115 pmap_remove_all(src_m); 2116 pmap_copy_page(src_m, dst_m); 2117 2118 /* 2119 * The object lock does not guarantee that "src_m" will 2120 * transition from invalid to valid, but it does ensure 2121 * that "src_m" will not transition from valid to 2122 * invalid. 2123 */ 2124 dst_m->dirty = dst_m->valid = src_m->valid; 2125 VM_OBJECT_RUNLOCK(object); 2126 } else { 2127 dst_m = src_m; 2128 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2129 goto again; 2130 if (dst_m->pindex >= dst_object->size) { 2131 /* 2132 * We are upgrading. Index can occur 2133 * out of bounds if the object type is 2134 * vnode and the file was truncated. 2135 */ 2136 vm_page_xunbusy(dst_m); 2137 break; 2138 } 2139 } 2140 2141 /* 2142 * Enter it in the pmap. If a wired, copy-on-write 2143 * mapping is being replaced by a write-enabled 2144 * mapping, then wire that new mapping. 2145 * 2146 * The page can be invalid if the user called 2147 * msync(MS_INVALIDATE) or truncated the backing vnode 2148 * or shared memory object. In this case, do not 2149 * insert it into pmap, but still do the copy so that 2150 * all copies of the wired map entry have similar 2151 * backing pages. 2152 */ 2153 if (vm_page_all_valid(dst_m)) { 2154 VM_OBJECT_WUNLOCK(dst_object); 2155 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2156 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2157 VM_OBJECT_WLOCK(dst_object); 2158 } 2159 2160 /* 2161 * Mark it no longer busy, and put it on the active list. 2162 */ 2163 if (upgrade) { 2164 if (src_m != dst_m) { 2165 vm_page_unwire(src_m, PQ_INACTIVE); 2166 vm_page_wire(dst_m); 2167 } else { 2168 KASSERT(vm_page_wired(dst_m), 2169 ("dst_m %p is not wired", dst_m)); 2170 } 2171 } else { 2172 vm_page_activate(dst_m); 2173 } 2174 vm_page_xunbusy(dst_m); 2175 } 2176 VM_OBJECT_WUNLOCK(dst_object); 2177 if (upgrade) { 2178 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2179 vm_object_deallocate(src_object); 2180 } 2181 } 2182 2183 /* 2184 * Block entry into the machine-independent layer's page fault handler by 2185 * the calling thread. Subsequent calls to vm_fault() by that thread will 2186 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2187 * spurious page faults. 2188 */ 2189 int 2190 vm_fault_disable_pagefaults(void) 2191 { 2192 2193 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2194 } 2195 2196 void 2197 vm_fault_enable_pagefaults(int save) 2198 { 2199 2200 curthread_pflags_restore(save); 2201 } 2202