1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 119 #define VM_FAULT_DONTNEED_MIN 1048576 120 121 struct faultstate { 122 /* Fault parameters. */ 123 vm_offset_t vaddr; 124 vm_page_t *m_hold; 125 vm_prot_t fault_type; 126 vm_prot_t prot; 127 int fault_flags; 128 boolean_t wired; 129 130 /* Control state. */ 131 struct timeval oom_start_time; 132 bool oom_started; 133 int nera; 134 135 /* Page reference for cow. */ 136 vm_page_t m_cow; 137 138 /* Current object. */ 139 vm_object_t object; 140 vm_pindex_t pindex; 141 vm_page_t m; 142 143 /* Top-level map object. */ 144 vm_object_t first_object; 145 vm_pindex_t first_pindex; 146 vm_page_t first_m; 147 148 /* Map state. */ 149 vm_map_t map; 150 vm_map_entry_t entry; 151 int map_generation; 152 bool lookup_still_valid; 153 154 /* Vnode if locked. */ 155 struct vnode *vp; 156 }; 157 158 /* 159 * Return codes for internal fault routines. 160 */ 161 enum fault_status { 162 FAULT_SUCCESS = 1, /* Return success to user. */ 163 FAULT_FAILURE, /* Return failure to user. */ 164 FAULT_CONTINUE, /* Continue faulting. */ 165 FAULT_RESTART, /* Restart fault. */ 166 FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 167 FAULT_HARD, /* Performed I/O. */ 168 FAULT_SOFT, /* Found valid page. */ 169 FAULT_PROTECTION_FAILURE, /* Invalid access. */ 170 }; 171 172 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 173 int ahead); 174 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 175 int backward, int forward, bool obj_locked); 176 177 static int vm_pfault_oom_attempts = 3; 178 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 179 &vm_pfault_oom_attempts, 0, 180 "Number of page allocation attempts in page fault handler before it " 181 "triggers OOM handling"); 182 183 static int vm_pfault_oom_wait = 10; 184 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 185 &vm_pfault_oom_wait, 0, 186 "Number of seconds to wait for free pages before retrying " 187 "the page fault handler"); 188 189 static inline void 190 fault_page_release(vm_page_t *mp) 191 { 192 vm_page_t m; 193 194 m = *mp; 195 if (m != NULL) { 196 /* 197 * We are likely to loop around again and attempt to busy 198 * this page. Deactivating it leaves it available for 199 * pageout while optimizing fault restarts. 200 */ 201 vm_page_deactivate(m); 202 vm_page_xunbusy(m); 203 *mp = NULL; 204 } 205 } 206 207 static inline void 208 fault_page_free(vm_page_t *mp) 209 { 210 vm_page_t m; 211 212 m = *mp; 213 if (m != NULL) { 214 VM_OBJECT_ASSERT_WLOCKED(m->object); 215 if (!vm_page_wired(m)) 216 vm_page_free(m); 217 else 218 vm_page_xunbusy(m); 219 *mp = NULL; 220 } 221 } 222 223 static inline void 224 unlock_map(struct faultstate *fs) 225 { 226 227 if (fs->lookup_still_valid) { 228 vm_map_lookup_done(fs->map, fs->entry); 229 fs->lookup_still_valid = false; 230 } 231 } 232 233 static void 234 unlock_vp(struct faultstate *fs) 235 { 236 237 if (fs->vp != NULL) { 238 vput(fs->vp); 239 fs->vp = NULL; 240 } 241 } 242 243 static void 244 fault_deallocate(struct faultstate *fs) 245 { 246 247 fault_page_release(&fs->m_cow); 248 fault_page_release(&fs->m); 249 vm_object_pip_wakeup(fs->object); 250 if (fs->object != fs->first_object) { 251 VM_OBJECT_WLOCK(fs->first_object); 252 fault_page_free(&fs->first_m); 253 VM_OBJECT_WUNLOCK(fs->first_object); 254 vm_object_pip_wakeup(fs->first_object); 255 } 256 vm_object_deallocate(fs->first_object); 257 unlock_map(fs); 258 unlock_vp(fs); 259 } 260 261 static void 262 unlock_and_deallocate(struct faultstate *fs) 263 { 264 265 VM_OBJECT_WUNLOCK(fs->object); 266 fault_deallocate(fs); 267 } 268 269 static void 270 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 271 { 272 bool need_dirty; 273 274 if (((fs->prot & VM_PROT_WRITE) == 0 && 275 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 276 (m->oflags & VPO_UNMANAGED) != 0) 277 return; 278 279 VM_PAGE_OBJECT_BUSY_ASSERT(m); 280 281 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 282 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 283 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 284 285 vm_object_set_writeable_dirty(m->object); 286 287 /* 288 * If the fault is a write, we know that this page is being 289 * written NOW so dirty it explicitly to save on 290 * pmap_is_modified() calls later. 291 * 292 * Also, since the page is now dirty, we can possibly tell 293 * the pager to release any swap backing the page. 294 */ 295 if (need_dirty && vm_page_set_dirty(m) == 0) { 296 /* 297 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 298 * if the page is already dirty to prevent data written with 299 * the expectation of being synced from not being synced. 300 * Likewise if this entry does not request NOSYNC then make 301 * sure the page isn't marked NOSYNC. Applications sharing 302 * data should use the same flags to avoid ping ponging. 303 */ 304 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 305 vm_page_aflag_set(m, PGA_NOSYNC); 306 else 307 vm_page_aflag_clear(m, PGA_NOSYNC); 308 } 309 310 } 311 312 /* 313 * Unlocks fs.first_object and fs.map on success. 314 */ 315 static enum fault_status 316 vm_fault_soft_fast(struct faultstate *fs) 317 { 318 vm_page_t m, m_map; 319 #if VM_NRESERVLEVEL > 0 320 vm_page_t m_super; 321 int flags; 322 #endif 323 int psind; 324 vm_offset_t vaddr; 325 enum fault_status res; 326 327 MPASS(fs->vp == NULL); 328 329 res = FAULT_SUCCESS; 330 vaddr = fs->vaddr; 331 vm_object_busy(fs->first_object); 332 m = vm_page_lookup(fs->first_object, fs->first_pindex); 333 /* A busy page can be mapped for read|execute access. */ 334 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 335 vm_page_busied(m)) || !vm_page_all_valid(m)) { 336 res = FAULT_FAILURE; 337 goto out; 338 } 339 m_map = m; 340 psind = 0; 341 #if VM_NRESERVLEVEL > 0 342 if ((m->flags & PG_FICTITIOUS) == 0 && 343 (m_super = vm_reserv_to_superpage(m)) != NULL && 344 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 345 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 346 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 347 (pagesizes[m_super->psind] - 1)) && !fs->wired && 348 pmap_ps_enabled(fs->map->pmap)) { 349 flags = PS_ALL_VALID; 350 if ((fs->prot & VM_PROT_WRITE) != 0) { 351 /* 352 * Create a superpage mapping allowing write access 353 * only if none of the constituent pages are busy and 354 * all of them are already dirty (except possibly for 355 * the page that was faulted on). 356 */ 357 flags |= PS_NONE_BUSY; 358 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 359 flags |= PS_ALL_DIRTY; 360 } 361 if (vm_page_ps_test(m_super, flags, m)) { 362 m_map = m_super; 363 psind = m_super->psind; 364 vaddr = rounddown2(vaddr, pagesizes[psind]); 365 /* Preset the modified bit for dirty superpages. */ 366 if ((flags & PS_ALL_DIRTY) != 0) 367 fs->fault_type |= VM_PROT_WRITE; 368 } 369 } 370 #endif 371 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 372 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 373 KERN_SUCCESS) { 374 res = FAULT_FAILURE; 375 goto out; 376 } 377 if (fs->m_hold != NULL) { 378 (*fs->m_hold) = m; 379 vm_page_wire(m); 380 } 381 if (psind == 0 && !fs->wired) 382 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 383 VM_OBJECT_RUNLOCK(fs->first_object); 384 vm_fault_dirty(fs, m); 385 vm_map_lookup_done(fs->map, fs->entry); 386 curthread->td_ru.ru_minflt++; 387 388 out: 389 vm_object_unbusy(fs->first_object); 390 return (res); 391 } 392 393 static void 394 vm_fault_restore_map_lock(struct faultstate *fs) 395 { 396 397 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 398 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 399 400 if (!vm_map_trylock_read(fs->map)) { 401 VM_OBJECT_WUNLOCK(fs->first_object); 402 vm_map_lock_read(fs->map); 403 VM_OBJECT_WLOCK(fs->first_object); 404 } 405 fs->lookup_still_valid = true; 406 } 407 408 static void 409 vm_fault_populate_check_page(vm_page_t m) 410 { 411 412 /* 413 * Check each page to ensure that the pager is obeying the 414 * interface: the page must be installed in the object, fully 415 * valid, and exclusively busied. 416 */ 417 MPASS(m != NULL); 418 MPASS(vm_page_all_valid(m)); 419 MPASS(vm_page_xbusied(m)); 420 } 421 422 static void 423 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 424 vm_pindex_t last) 425 { 426 vm_page_t m; 427 vm_pindex_t pidx; 428 429 VM_OBJECT_ASSERT_WLOCKED(object); 430 MPASS(first <= last); 431 for (pidx = first, m = vm_page_lookup(object, pidx); 432 pidx <= last; pidx++, m = vm_page_next(m)) { 433 vm_fault_populate_check_page(m); 434 vm_page_deactivate(m); 435 vm_page_xunbusy(m); 436 } 437 } 438 439 static enum fault_status 440 vm_fault_populate(struct faultstate *fs) 441 { 442 vm_offset_t vaddr; 443 vm_page_t m; 444 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 445 int bdry_idx, i, npages, psind, rv; 446 enum fault_status res; 447 448 MPASS(fs->object == fs->first_object); 449 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 450 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 451 MPASS(fs->first_object->backing_object == NULL); 452 MPASS(fs->lookup_still_valid); 453 454 pager_first = OFF_TO_IDX(fs->entry->offset); 455 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 456 unlock_map(fs); 457 unlock_vp(fs); 458 459 res = FAULT_SUCCESS; 460 461 /* 462 * Call the pager (driver) populate() method. 463 * 464 * There is no guarantee that the method will be called again 465 * if the current fault is for read, and a future fault is 466 * for write. Report the entry's maximum allowed protection 467 * to the driver. 468 */ 469 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 470 fs->fault_type, fs->entry->max_protection, &pager_first, 471 &pager_last); 472 473 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 474 if (rv == VM_PAGER_BAD) { 475 /* 476 * VM_PAGER_BAD is the backdoor for a pager to request 477 * normal fault handling. 478 */ 479 vm_fault_restore_map_lock(fs); 480 if (fs->map->timestamp != fs->map_generation) 481 return (FAULT_RESTART); 482 return (FAULT_CONTINUE); 483 } 484 if (rv != VM_PAGER_OK) 485 return (FAULT_FAILURE); /* AKA SIGSEGV */ 486 487 /* Ensure that the driver is obeying the interface. */ 488 MPASS(pager_first <= pager_last); 489 MPASS(fs->first_pindex <= pager_last); 490 MPASS(fs->first_pindex >= pager_first); 491 MPASS(pager_last < fs->first_object->size); 492 493 vm_fault_restore_map_lock(fs); 494 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 495 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 496 if (fs->map->timestamp != fs->map_generation) { 497 if (bdry_idx == 0) { 498 vm_fault_populate_cleanup(fs->first_object, pager_first, 499 pager_last); 500 } else { 501 m = vm_page_lookup(fs->first_object, pager_first); 502 if (m != fs->m) 503 vm_page_xunbusy(m); 504 } 505 return (FAULT_RESTART); 506 } 507 508 /* 509 * The map is unchanged after our last unlock. Process the fault. 510 * 511 * First, the special case of largepage mappings, where 512 * populate only busies the first page in superpage run. 513 */ 514 if (bdry_idx != 0) { 515 KASSERT(PMAP_HAS_LARGEPAGES, 516 ("missing pmap support for large pages")); 517 m = vm_page_lookup(fs->first_object, pager_first); 518 vm_fault_populate_check_page(m); 519 VM_OBJECT_WUNLOCK(fs->first_object); 520 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 521 fs->entry->offset; 522 /* assert alignment for entry */ 523 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 524 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 525 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 526 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 527 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 528 ("unaligned superpage m %p %#jx", m, 529 (uintmax_t)VM_PAGE_TO_PHYS(m))); 530 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 531 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 532 PMAP_ENTER_LARGEPAGE, bdry_idx); 533 VM_OBJECT_WLOCK(fs->first_object); 534 vm_page_xunbusy(m); 535 if (rv != KERN_SUCCESS) { 536 res = FAULT_FAILURE; 537 goto out; 538 } 539 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 540 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 541 vm_page_wire(m + i); 542 } 543 if (fs->m_hold != NULL) { 544 *fs->m_hold = m + (fs->first_pindex - pager_first); 545 vm_page_wire(*fs->m_hold); 546 } 547 goto out; 548 } 549 550 /* 551 * The range [pager_first, pager_last] that is given to the 552 * pager is only a hint. The pager may populate any range 553 * within the object that includes the requested page index. 554 * In case the pager expanded the range, clip it to fit into 555 * the map entry. 556 */ 557 map_first = OFF_TO_IDX(fs->entry->offset); 558 if (map_first > pager_first) { 559 vm_fault_populate_cleanup(fs->first_object, pager_first, 560 map_first - 1); 561 pager_first = map_first; 562 } 563 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 564 if (map_last < pager_last) { 565 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 566 pager_last); 567 pager_last = map_last; 568 } 569 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 570 pidx <= pager_last; 571 pidx += npages, m = vm_page_next(&m[npages - 1])) { 572 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 573 574 psind = m->psind; 575 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 576 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 577 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 578 psind = 0; 579 580 npages = atop(pagesizes[psind]); 581 for (i = 0; i < npages; i++) { 582 vm_fault_populate_check_page(&m[i]); 583 vm_fault_dirty(fs, &m[i]); 584 } 585 VM_OBJECT_WUNLOCK(fs->first_object); 586 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 587 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 588 589 /* 590 * pmap_enter() may fail for a superpage mapping if additional 591 * protection policies prevent the full mapping. 592 * For example, this will happen on amd64 if the entire 593 * address range does not share the same userspace protection 594 * key. Revert to single-page mappings if this happens. 595 */ 596 MPASS(rv == KERN_SUCCESS || 597 (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 598 if (__predict_false(psind > 0 && 599 rv == KERN_PROTECTION_FAILURE)) { 600 for (i = 0; i < npages; i++) { 601 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 602 &m[i], fs->prot, fs->fault_type | 603 (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 604 MPASS(rv == KERN_SUCCESS); 605 } 606 } 607 608 VM_OBJECT_WLOCK(fs->first_object); 609 for (i = 0; i < npages; i++) { 610 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 611 vm_page_wire(&m[i]); 612 else 613 vm_page_activate(&m[i]); 614 if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 615 (*fs->m_hold) = &m[i]; 616 vm_page_wire(&m[i]); 617 } 618 vm_page_xunbusy(&m[i]); 619 } 620 } 621 out: 622 curthread->td_ru.ru_majflt++; 623 return (res); 624 } 625 626 static int prot_fault_translation; 627 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 628 &prot_fault_translation, 0, 629 "Control signal to deliver on protection fault"); 630 631 /* compat definition to keep common code for signal translation */ 632 #define UCODE_PAGEFLT 12 633 #ifdef T_PAGEFLT 634 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 635 #endif 636 637 /* 638 * vm_fault_trap: 639 * 640 * Handle a page fault occurring at the given address, 641 * requiring the given permissions, in the map specified. 642 * If successful, the page is inserted into the 643 * associated physical map. 644 * 645 * NOTE: the given address should be truncated to the 646 * proper page address. 647 * 648 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 649 * a standard error specifying why the fault is fatal is returned. 650 * 651 * The map in question must be referenced, and remains so. 652 * Caller may hold no locks. 653 */ 654 int 655 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 656 int fault_flags, int *signo, int *ucode) 657 { 658 int result; 659 660 MPASS(signo == NULL || ucode != NULL); 661 #ifdef KTRACE 662 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 663 ktrfault(vaddr, fault_type); 664 #endif 665 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 666 NULL); 667 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 668 result == KERN_INVALID_ADDRESS || 669 result == KERN_RESOURCE_SHORTAGE || 670 result == KERN_PROTECTION_FAILURE || 671 result == KERN_OUT_OF_BOUNDS, 672 ("Unexpected Mach error %d from vm_fault()", result)); 673 #ifdef KTRACE 674 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 675 ktrfaultend(result); 676 #endif 677 if (result != KERN_SUCCESS && signo != NULL) { 678 switch (result) { 679 case KERN_FAILURE: 680 case KERN_INVALID_ADDRESS: 681 *signo = SIGSEGV; 682 *ucode = SEGV_MAPERR; 683 break; 684 case KERN_RESOURCE_SHORTAGE: 685 *signo = SIGBUS; 686 *ucode = BUS_OOMERR; 687 break; 688 case KERN_OUT_OF_BOUNDS: 689 *signo = SIGBUS; 690 *ucode = BUS_OBJERR; 691 break; 692 case KERN_PROTECTION_FAILURE: 693 if (prot_fault_translation == 0) { 694 /* 695 * Autodetect. This check also covers 696 * the images without the ABI-tag ELF 697 * note. 698 */ 699 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 700 curproc->p_osrel >= P_OSREL_SIGSEGV) { 701 *signo = SIGSEGV; 702 *ucode = SEGV_ACCERR; 703 } else { 704 *signo = SIGBUS; 705 *ucode = UCODE_PAGEFLT; 706 } 707 } else if (prot_fault_translation == 1) { 708 /* Always compat mode. */ 709 *signo = SIGBUS; 710 *ucode = UCODE_PAGEFLT; 711 } else { 712 /* Always SIGSEGV mode. */ 713 *signo = SIGSEGV; 714 *ucode = SEGV_ACCERR; 715 } 716 break; 717 default: 718 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 719 result)); 720 break; 721 } 722 } 723 return (result); 724 } 725 726 static enum fault_status 727 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 728 { 729 struct vnode *vp; 730 int error, locked; 731 732 if (fs->object->type != OBJT_VNODE) 733 return (FAULT_CONTINUE); 734 vp = fs->object->handle; 735 if (vp == fs->vp) { 736 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 737 return (FAULT_CONTINUE); 738 } 739 740 /* 741 * Perform an unlock in case the desired vnode changed while 742 * the map was unlocked during a retry. 743 */ 744 unlock_vp(fs); 745 746 locked = VOP_ISLOCKED(vp); 747 if (locked != LK_EXCLUSIVE) 748 locked = LK_SHARED; 749 750 /* 751 * We must not sleep acquiring the vnode lock while we have 752 * the page exclusive busied or the object's 753 * paging-in-progress count incremented. Otherwise, we could 754 * deadlock. 755 */ 756 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 757 if (error == 0) { 758 fs->vp = vp; 759 return (FAULT_CONTINUE); 760 } 761 762 vhold(vp); 763 if (objlocked) 764 unlock_and_deallocate(fs); 765 else 766 fault_deallocate(fs); 767 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 768 vdrop(vp); 769 fs->vp = vp; 770 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 771 return (FAULT_RESTART); 772 } 773 774 /* 775 * Calculate the desired readahead. Handle drop-behind. 776 * 777 * Returns the number of readahead blocks to pass to the pager. 778 */ 779 static int 780 vm_fault_readahead(struct faultstate *fs) 781 { 782 int era, nera; 783 u_char behavior; 784 785 KASSERT(fs->lookup_still_valid, ("map unlocked")); 786 era = fs->entry->read_ahead; 787 behavior = vm_map_entry_behavior(fs->entry); 788 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 789 nera = 0; 790 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 791 nera = VM_FAULT_READ_AHEAD_MAX; 792 if (fs->vaddr == fs->entry->next_read) 793 vm_fault_dontneed(fs, fs->vaddr, nera); 794 } else if (fs->vaddr == fs->entry->next_read) { 795 /* 796 * This is a sequential fault. Arithmetically 797 * increase the requested number of pages in 798 * the read-ahead window. The requested 799 * number of pages is "# of sequential faults 800 * x (read ahead min + 1) + read ahead min" 801 */ 802 nera = VM_FAULT_READ_AHEAD_MIN; 803 if (era > 0) { 804 nera += era + 1; 805 if (nera > VM_FAULT_READ_AHEAD_MAX) 806 nera = VM_FAULT_READ_AHEAD_MAX; 807 } 808 if (era == VM_FAULT_READ_AHEAD_MAX) 809 vm_fault_dontneed(fs, fs->vaddr, nera); 810 } else { 811 /* 812 * This is a non-sequential fault. 813 */ 814 nera = 0; 815 } 816 if (era != nera) { 817 /* 818 * A read lock on the map suffices to update 819 * the read ahead count safely. 820 */ 821 fs->entry->read_ahead = nera; 822 } 823 824 return (nera); 825 } 826 827 static int 828 vm_fault_lookup(struct faultstate *fs) 829 { 830 int result; 831 832 KASSERT(!fs->lookup_still_valid, 833 ("vm_fault_lookup: Map already locked.")); 834 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 835 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 836 &fs->first_pindex, &fs->prot, &fs->wired); 837 if (result != KERN_SUCCESS) { 838 unlock_vp(fs); 839 return (result); 840 } 841 842 fs->map_generation = fs->map->timestamp; 843 844 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 845 panic("%s: fault on nofault entry, addr: %#lx", 846 __func__, (u_long)fs->vaddr); 847 } 848 849 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 850 fs->entry->wiring_thread != curthread) { 851 vm_map_unlock_read(fs->map); 852 vm_map_lock(fs->map); 853 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 854 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 855 unlock_vp(fs); 856 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 857 vm_map_unlock_and_wait(fs->map, 0); 858 } else 859 vm_map_unlock(fs->map); 860 return (KERN_RESOURCE_SHORTAGE); 861 } 862 863 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 864 865 if (fs->wired) 866 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 867 else 868 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 869 ("!fs->wired && VM_FAULT_WIRE")); 870 fs->lookup_still_valid = true; 871 872 return (KERN_SUCCESS); 873 } 874 875 static int 876 vm_fault_relookup(struct faultstate *fs) 877 { 878 vm_object_t retry_object; 879 vm_pindex_t retry_pindex; 880 vm_prot_t retry_prot; 881 int result; 882 883 if (!vm_map_trylock_read(fs->map)) 884 return (KERN_RESTART); 885 886 fs->lookup_still_valid = true; 887 if (fs->map->timestamp == fs->map_generation) 888 return (KERN_SUCCESS); 889 890 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 891 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 892 &fs->wired); 893 if (result != KERN_SUCCESS) { 894 /* 895 * If retry of map lookup would have blocked then 896 * retry fault from start. 897 */ 898 if (result == KERN_FAILURE) 899 return (KERN_RESTART); 900 return (result); 901 } 902 if (retry_object != fs->first_object || 903 retry_pindex != fs->first_pindex) 904 return (KERN_RESTART); 905 906 /* 907 * Check whether the protection has changed or the object has 908 * been copied while we left the map unlocked. Changing from 909 * read to write permission is OK - we leave the page 910 * write-protected, and catch the write fault. Changing from 911 * write to read permission means that we can't mark the page 912 * write-enabled after all. 913 */ 914 fs->prot &= retry_prot; 915 fs->fault_type &= retry_prot; 916 if (fs->prot == 0) 917 return (KERN_RESTART); 918 919 /* Reassert because wired may have changed. */ 920 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 921 ("!wired && VM_FAULT_WIRE")); 922 923 return (KERN_SUCCESS); 924 } 925 926 static void 927 vm_fault_cow(struct faultstate *fs) 928 { 929 bool is_first_object_locked; 930 931 KASSERT(fs->object != fs->first_object, 932 ("source and target COW objects are identical")); 933 934 /* 935 * This allows pages to be virtually copied from a backing_object 936 * into the first_object, where the backing object has no other 937 * refs to it, and cannot gain any more refs. Instead of a bcopy, 938 * we just move the page from the backing object to the first 939 * object. Note that we must mark the page dirty in the first 940 * object so that it will go out to swap when needed. 941 */ 942 is_first_object_locked = false; 943 if ( 944 /* 945 * Only one shadow object and no other refs. 946 */ 947 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 948 /* 949 * No other ways to look the object up 950 */ 951 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 952 /* 953 * We don't chase down the shadow chain and we can acquire locks. 954 */ 955 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 956 fs->object == fs->first_object->backing_object && 957 VM_OBJECT_TRYWLOCK(fs->object)) { 958 /* 959 * Remove but keep xbusy for replace. fs->m is moved into 960 * fs->first_object and left busy while fs->first_m is 961 * conditionally freed. 962 */ 963 vm_page_remove_xbusy(fs->m); 964 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 965 fs->first_m); 966 vm_page_dirty(fs->m); 967 #if VM_NRESERVLEVEL > 0 968 /* 969 * Rename the reservation. 970 */ 971 vm_reserv_rename(fs->m, fs->first_object, fs->object, 972 OFF_TO_IDX(fs->first_object->backing_object_offset)); 973 #endif 974 VM_OBJECT_WUNLOCK(fs->object); 975 VM_OBJECT_WUNLOCK(fs->first_object); 976 fs->first_m = fs->m; 977 fs->m = NULL; 978 VM_CNT_INC(v_cow_optim); 979 } else { 980 if (is_first_object_locked) 981 VM_OBJECT_WUNLOCK(fs->first_object); 982 /* 983 * Oh, well, lets copy it. 984 */ 985 pmap_copy_page(fs->m, fs->first_m); 986 vm_page_valid(fs->first_m); 987 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 988 vm_page_wire(fs->first_m); 989 vm_page_unwire(fs->m, PQ_INACTIVE); 990 } 991 /* 992 * Save the cow page to be released after 993 * pmap_enter is complete. 994 */ 995 fs->m_cow = fs->m; 996 fs->m = NULL; 997 998 /* 999 * Typically, the shadow object is either private to this 1000 * address space (OBJ_ONEMAPPING) or its pages are read only. 1001 * In the highly unusual case where the pages of a shadow object 1002 * are read/write shared between this and other address spaces, 1003 * we need to ensure that any pmap-level mappings to the 1004 * original, copy-on-write page from the backing object are 1005 * removed from those other address spaces. 1006 * 1007 * The flag check is racy, but this is tolerable: if 1008 * OBJ_ONEMAPPING is cleared after the check, the busy state 1009 * ensures that new mappings of m_cow can't be created. 1010 * pmap_enter() will replace an existing mapping in the current 1011 * address space. If OBJ_ONEMAPPING is set after the check, 1012 * removing mappings will at worse trigger some unnecessary page 1013 * faults. 1014 */ 1015 vm_page_assert_xbusied(fs->m_cow); 1016 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1017 pmap_remove_all(fs->m_cow); 1018 } 1019 1020 vm_object_pip_wakeup(fs->object); 1021 1022 /* 1023 * Only use the new page below... 1024 */ 1025 fs->object = fs->first_object; 1026 fs->pindex = fs->first_pindex; 1027 fs->m = fs->first_m; 1028 VM_CNT_INC(v_cow_faults); 1029 curthread->td_cow++; 1030 } 1031 1032 static bool 1033 vm_fault_next(struct faultstate *fs) 1034 { 1035 vm_object_t next_object; 1036 1037 /* 1038 * The requested page does not exist at this object/ 1039 * offset. Remove the invalid page from the object, 1040 * waking up anyone waiting for it, and continue on to 1041 * the next object. However, if this is the top-level 1042 * object, we must leave the busy page in place to 1043 * prevent another process from rushing past us, and 1044 * inserting the page in that object at the same time 1045 * that we are. 1046 */ 1047 if (fs->object == fs->first_object) { 1048 fs->first_m = fs->m; 1049 fs->m = NULL; 1050 } else 1051 fault_page_free(&fs->m); 1052 1053 /* 1054 * Move on to the next object. Lock the next object before 1055 * unlocking the current one. 1056 */ 1057 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1058 next_object = fs->object->backing_object; 1059 if (next_object == NULL) 1060 return (false); 1061 MPASS(fs->first_m != NULL); 1062 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1063 VM_OBJECT_WLOCK(next_object); 1064 vm_object_pip_add(next_object, 1); 1065 if (fs->object != fs->first_object) 1066 vm_object_pip_wakeup(fs->object); 1067 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1068 VM_OBJECT_WUNLOCK(fs->object); 1069 fs->object = next_object; 1070 1071 return (true); 1072 } 1073 1074 static void 1075 vm_fault_zerofill(struct faultstate *fs) 1076 { 1077 1078 /* 1079 * If there's no object left, fill the page in the top 1080 * object with zeros. 1081 */ 1082 if (fs->object != fs->first_object) { 1083 vm_object_pip_wakeup(fs->object); 1084 fs->object = fs->first_object; 1085 fs->pindex = fs->first_pindex; 1086 } 1087 MPASS(fs->first_m != NULL); 1088 MPASS(fs->m == NULL); 1089 fs->m = fs->first_m; 1090 fs->first_m = NULL; 1091 1092 /* 1093 * Zero the page if necessary and mark it valid. 1094 */ 1095 if ((fs->m->flags & PG_ZERO) == 0) { 1096 pmap_zero_page(fs->m); 1097 } else { 1098 VM_CNT_INC(v_ozfod); 1099 } 1100 VM_CNT_INC(v_zfod); 1101 vm_page_valid(fs->m); 1102 } 1103 1104 /* 1105 * Initiate page fault after timeout. Returns true if caller should 1106 * do vm_waitpfault() after the call. 1107 */ 1108 static bool 1109 vm_fault_allocate_oom(struct faultstate *fs) 1110 { 1111 struct timeval now; 1112 1113 unlock_and_deallocate(fs); 1114 if (vm_pfault_oom_attempts < 0) 1115 return (true); 1116 if (!fs->oom_started) { 1117 fs->oom_started = true; 1118 getmicrotime(&fs->oom_start_time); 1119 return (true); 1120 } 1121 1122 getmicrotime(&now); 1123 timevalsub(&now, &fs->oom_start_time); 1124 if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1125 return (true); 1126 1127 if (bootverbose) 1128 printf( 1129 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1130 curproc->p_pid, curproc->p_comm); 1131 vm_pageout_oom(VM_OOM_MEM_PF); 1132 fs->oom_started = false; 1133 return (false); 1134 } 1135 1136 /* 1137 * Allocate a page directly or via the object populate method. 1138 */ 1139 static enum fault_status 1140 vm_fault_allocate(struct faultstate *fs) 1141 { 1142 struct domainset *dset; 1143 enum fault_status res; 1144 1145 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1146 res = vm_fault_lock_vnode(fs, true); 1147 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1148 if (res == FAULT_RESTART) 1149 return (res); 1150 } 1151 1152 if (fs->pindex >= fs->object->size) { 1153 unlock_and_deallocate(fs); 1154 return (FAULT_OUT_OF_BOUNDS); 1155 } 1156 1157 if (fs->object == fs->first_object && 1158 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1159 fs->first_object->shadow_count == 0) { 1160 res = vm_fault_populate(fs); 1161 switch (res) { 1162 case FAULT_SUCCESS: 1163 case FAULT_FAILURE: 1164 case FAULT_RESTART: 1165 unlock_and_deallocate(fs); 1166 return (res); 1167 case FAULT_CONTINUE: 1168 /* 1169 * Pager's populate() method 1170 * returned VM_PAGER_BAD. 1171 */ 1172 break; 1173 default: 1174 panic("inconsistent return codes"); 1175 } 1176 } 1177 1178 /* 1179 * Allocate a new page for this object/offset pair. 1180 * 1181 * If the process has a fatal signal pending, prioritize the allocation 1182 * with the expectation that the process will exit shortly and free some 1183 * pages. In particular, the signal may have been posted by the page 1184 * daemon in an attempt to resolve an out-of-memory condition. 1185 * 1186 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1187 * might be not observed here, and allocation fails, causing a restart 1188 * and new reading of the p_flag. 1189 */ 1190 dset = fs->object->domain.dr_policy; 1191 if (dset == NULL) 1192 dset = curthread->td_domain.dr_policy; 1193 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1194 #if VM_NRESERVLEVEL > 0 1195 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1196 #endif 1197 fs->m = vm_page_alloc(fs->object, fs->pindex, 1198 P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0); 1199 } 1200 if (fs->m == NULL) { 1201 if (vm_fault_allocate_oom(fs)) 1202 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1203 return (FAULT_RESTART); 1204 } 1205 fs->oom_started = false; 1206 1207 return (FAULT_CONTINUE); 1208 } 1209 1210 /* 1211 * Call the pager to retrieve the page if there is a chance 1212 * that the pager has it, and potentially retrieve additional 1213 * pages at the same time. 1214 */ 1215 static enum fault_status 1216 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 1217 { 1218 vm_offset_t e_end, e_start; 1219 int ahead, behind, cluster_offset, rv; 1220 enum fault_status status; 1221 u_char behavior; 1222 1223 /* 1224 * Prepare for unlocking the map. Save the map 1225 * entry's start and end addresses, which are used to 1226 * optimize the size of the pager operation below. 1227 * Even if the map entry's addresses change after 1228 * unlocking the map, using the saved addresses is 1229 * safe. 1230 */ 1231 e_start = fs->entry->start; 1232 e_end = fs->entry->end; 1233 behavior = vm_map_entry_behavior(fs->entry); 1234 1235 /* 1236 * If the pager for the current object might have 1237 * the page, then determine the number of additional 1238 * pages to read and potentially reprioritize 1239 * previously read pages for earlier reclamation. 1240 * These operations should only be performed once per 1241 * page fault. Even if the current pager doesn't 1242 * have the page, the number of additional pages to 1243 * read will apply to subsequent objects in the 1244 * shadow chain. 1245 */ 1246 if (fs->nera == -1 && !P_KILLED(curproc)) 1247 fs->nera = vm_fault_readahead(fs); 1248 1249 /* 1250 * Release the map lock before locking the vnode or 1251 * sleeping in the pager. (If the current object has 1252 * a shadow, then an earlier iteration of this loop 1253 * may have already unlocked the map.) 1254 */ 1255 unlock_map(fs); 1256 1257 status = vm_fault_lock_vnode(fs, false); 1258 MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1259 if (status == FAULT_RESTART) 1260 return (status); 1261 KASSERT(fs->vp == NULL || !fs->map->system_map, 1262 ("vm_fault: vnode-backed object mapped by system map")); 1263 1264 /* 1265 * Page in the requested page and hint the pager, 1266 * that it may bring up surrounding pages. 1267 */ 1268 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1269 P_KILLED(curproc)) { 1270 behind = 0; 1271 ahead = 0; 1272 } else { 1273 /* Is this a sequential fault? */ 1274 if (fs->nera > 0) { 1275 behind = 0; 1276 ahead = fs->nera; 1277 } else { 1278 /* 1279 * Request a cluster of pages that is 1280 * aligned to a VM_FAULT_READ_DEFAULT 1281 * page offset boundary within the 1282 * object. Alignment to a page offset 1283 * boundary is more likely to coincide 1284 * with the underlying file system 1285 * block than alignment to a virtual 1286 * address boundary. 1287 */ 1288 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1289 behind = ulmin(cluster_offset, 1290 atop(fs->vaddr - e_start)); 1291 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1292 } 1293 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1294 } 1295 *behindp = behind; 1296 *aheadp = ahead; 1297 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1298 if (rv == VM_PAGER_OK) 1299 return (FAULT_HARD); 1300 if (rv == VM_PAGER_ERROR) 1301 printf("vm_fault: pager read error, pid %d (%s)\n", 1302 curproc->p_pid, curproc->p_comm); 1303 /* 1304 * If an I/O error occurred or the requested page was 1305 * outside the range of the pager, clean up and return 1306 * an error. 1307 */ 1308 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1309 VM_OBJECT_WLOCK(fs->object); 1310 fault_page_free(&fs->m); 1311 unlock_and_deallocate(fs); 1312 return (FAULT_OUT_OF_BOUNDS); 1313 } 1314 KASSERT(rv == VM_PAGER_FAIL, 1315 ("%s: unepxected pager error %d", __func__, rv)); 1316 return (FAULT_CONTINUE); 1317 } 1318 1319 /* 1320 * Wait/Retry if the page is busy. We have to do this if the page is 1321 * either exclusive or shared busy because the vm_pager may be using 1322 * read busy for pageouts (and even pageins if it is the vnode pager), 1323 * and we could end up trying to pagein and pageout the same page 1324 * simultaneously. 1325 * 1326 * We can theoretically allow the busy case on a read fault if the page 1327 * is marked valid, but since such pages are typically already pmap'd, 1328 * putting that special case in might be more effort then it is worth. 1329 * We cannot under any circumstances mess around with a shared busied 1330 * page except, perhaps, to pmap it. 1331 */ 1332 static void 1333 vm_fault_busy_sleep(struct faultstate *fs) 1334 { 1335 /* 1336 * Reference the page before unlocking and 1337 * sleeping so that the page daemon is less 1338 * likely to reclaim it. 1339 */ 1340 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1341 if (fs->object != fs->first_object) { 1342 fault_page_release(&fs->first_m); 1343 vm_object_pip_wakeup(fs->first_object); 1344 } 1345 vm_object_pip_wakeup(fs->object); 1346 unlock_map(fs); 1347 if (fs->m != vm_page_lookup(fs->object, fs->pindex) || 1348 !vm_page_busy_sleep(fs->m, "vmpfw", 0)) 1349 VM_OBJECT_WUNLOCK(fs->object); 1350 VM_CNT_INC(v_intrans); 1351 vm_object_deallocate(fs->first_object); 1352 } 1353 1354 /* 1355 * Handle page lookup, populate, allocate, page-in for the current 1356 * object. 1357 * 1358 * The object is locked on entry and will remain locked with a return 1359 * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1360 * Otherwise, the object will be unlocked upon return. 1361 */ 1362 static enum fault_status 1363 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1364 { 1365 enum fault_status res; 1366 bool dead; 1367 1368 /* 1369 * If the object is marked for imminent termination, we retry 1370 * here, since the collapse pass has raced with us. Otherwise, 1371 * if we see terminally dead object, return fail. 1372 */ 1373 if ((fs->object->flags & OBJ_DEAD) != 0) { 1374 dead = fs->object->type == OBJT_DEAD; 1375 unlock_and_deallocate(fs); 1376 if (dead) 1377 return (FAULT_PROTECTION_FAILURE); 1378 pause("vmf_de", 1); 1379 return (FAULT_RESTART); 1380 } 1381 1382 /* 1383 * See if the page is resident. 1384 */ 1385 fs->m = vm_page_lookup(fs->object, fs->pindex); 1386 if (fs->m != NULL) { 1387 if (!vm_page_tryxbusy(fs->m)) { 1388 vm_fault_busy_sleep(fs); 1389 return (FAULT_RESTART); 1390 } 1391 1392 /* 1393 * The page is marked busy for other processes and the 1394 * pagedaemon. If it is still completely valid we are 1395 * done. 1396 */ 1397 if (vm_page_all_valid(fs->m)) { 1398 VM_OBJECT_WUNLOCK(fs->object); 1399 return (FAULT_SOFT); 1400 } 1401 } 1402 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1403 1404 /* 1405 * Page is not resident. If the pager might contain the page 1406 * or this is the beginning of the search, allocate a new 1407 * page. (Default objects are zero-fill, so there is no real 1408 * pager for them.) 1409 */ 1410 if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT || 1411 fs->object == fs->first_object)) { 1412 res = vm_fault_allocate(fs); 1413 if (res != FAULT_CONTINUE) 1414 return (res); 1415 } 1416 1417 /* 1418 * Default objects have no pager so no exclusive busy exists 1419 * to protect this page in the chain. Skip to the next 1420 * object without dropping the lock to preserve atomicity of 1421 * shadow faults. 1422 */ 1423 if (fs->object->type != OBJT_DEFAULT) { 1424 /* 1425 * At this point, we have either allocated a new page 1426 * or found an existing page that is only partially 1427 * valid. 1428 * 1429 * We hold a reference on the current object and the 1430 * page is exclusive busied. The exclusive busy 1431 * prevents simultaneous faults and collapses while 1432 * the object lock is dropped. 1433 */ 1434 VM_OBJECT_WUNLOCK(fs->object); 1435 res = vm_fault_getpages(fs, behindp, aheadp); 1436 if (res == FAULT_CONTINUE) 1437 VM_OBJECT_WLOCK(fs->object); 1438 } else { 1439 res = FAULT_CONTINUE; 1440 } 1441 return (res); 1442 } 1443 1444 int 1445 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1446 int fault_flags, vm_page_t *m_hold) 1447 { 1448 struct faultstate fs; 1449 int ahead, behind, faultcount, rv; 1450 enum fault_status res; 1451 bool hardfault; 1452 1453 VM_CNT_INC(v_vm_faults); 1454 1455 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1456 return (KERN_PROTECTION_FAILURE); 1457 1458 fs.vp = NULL; 1459 fs.vaddr = vaddr; 1460 fs.m_hold = m_hold; 1461 fs.fault_flags = fault_flags; 1462 fs.map = map; 1463 fs.lookup_still_valid = false; 1464 fs.oom_started = false; 1465 fs.nera = -1; 1466 faultcount = 0; 1467 hardfault = false; 1468 1469 RetryFault: 1470 fs.fault_type = fault_type; 1471 1472 /* 1473 * Find the backing store object and offset into it to begin the 1474 * search. 1475 */ 1476 rv = vm_fault_lookup(&fs); 1477 if (rv != KERN_SUCCESS) { 1478 if (rv == KERN_RESOURCE_SHORTAGE) 1479 goto RetryFault; 1480 return (rv); 1481 } 1482 1483 /* 1484 * Try to avoid lock contention on the top-level object through 1485 * special-case handling of some types of page faults, specifically, 1486 * those that are mapping an existing page from the top-level object. 1487 * Under this condition, a read lock on the object suffices, allowing 1488 * multiple page faults of a similar type to run in parallel. 1489 */ 1490 if (fs.vp == NULL /* avoid locked vnode leak */ && 1491 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1492 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1493 VM_OBJECT_RLOCK(fs.first_object); 1494 res = vm_fault_soft_fast(&fs); 1495 if (res == FAULT_SUCCESS) 1496 return (KERN_SUCCESS); 1497 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1498 VM_OBJECT_RUNLOCK(fs.first_object); 1499 VM_OBJECT_WLOCK(fs.first_object); 1500 } 1501 } else { 1502 VM_OBJECT_WLOCK(fs.first_object); 1503 } 1504 1505 /* 1506 * Make a reference to this object to prevent its disposal while we 1507 * are messing with it. Once we have the reference, the map is free 1508 * to be diddled. Since objects reference their shadows (and copies), 1509 * they will stay around as well. 1510 * 1511 * Bump the paging-in-progress count to prevent size changes (e.g. 1512 * truncation operations) during I/O. 1513 */ 1514 vm_object_reference_locked(fs.first_object); 1515 vm_object_pip_add(fs.first_object, 1); 1516 1517 fs.m_cow = fs.m = fs.first_m = NULL; 1518 1519 /* 1520 * Search for the page at object/offset. 1521 */ 1522 fs.object = fs.first_object; 1523 fs.pindex = fs.first_pindex; 1524 1525 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1526 res = vm_fault_allocate(&fs); 1527 switch (res) { 1528 case FAULT_RESTART: 1529 goto RetryFault; 1530 case FAULT_SUCCESS: 1531 return (KERN_SUCCESS); 1532 case FAULT_FAILURE: 1533 return (KERN_FAILURE); 1534 case FAULT_OUT_OF_BOUNDS: 1535 return (KERN_OUT_OF_BOUNDS); 1536 case FAULT_CONTINUE: 1537 break; 1538 default: 1539 panic("vm_fault: Unhandled status %d", res); 1540 } 1541 } 1542 1543 while (TRUE) { 1544 KASSERT(fs.m == NULL, 1545 ("page still set %p at loop start", fs.m)); 1546 1547 res = vm_fault_object(&fs, &behind, &ahead); 1548 switch (res) { 1549 case FAULT_SOFT: 1550 goto found; 1551 case FAULT_HARD: 1552 faultcount = behind + 1 + ahead; 1553 hardfault = true; 1554 goto found; 1555 case FAULT_RESTART: 1556 goto RetryFault; 1557 case FAULT_SUCCESS: 1558 return (KERN_SUCCESS); 1559 case FAULT_FAILURE: 1560 return (KERN_FAILURE); 1561 case FAULT_OUT_OF_BOUNDS: 1562 return (KERN_OUT_OF_BOUNDS); 1563 case FAULT_PROTECTION_FAILURE: 1564 return (KERN_PROTECTION_FAILURE); 1565 case FAULT_CONTINUE: 1566 break; 1567 default: 1568 panic("vm_fault: Unhandled status %d", res); 1569 } 1570 1571 /* 1572 * The page was not found in the current object. Try to 1573 * traverse into a backing object or zero fill if none is 1574 * found. 1575 */ 1576 if (vm_fault_next(&fs)) 1577 continue; 1578 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1579 if (fs.first_object == fs.object) 1580 fault_page_free(&fs.first_m); 1581 unlock_and_deallocate(&fs); 1582 return (KERN_OUT_OF_BOUNDS); 1583 } 1584 VM_OBJECT_WUNLOCK(fs.object); 1585 vm_fault_zerofill(&fs); 1586 /* Don't try to prefault neighboring pages. */ 1587 faultcount = 1; 1588 break; 1589 } 1590 1591 found: 1592 /* 1593 * A valid page has been found and exclusively busied. The 1594 * object lock must no longer be held. 1595 */ 1596 vm_page_assert_xbusied(fs.m); 1597 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1598 1599 /* 1600 * If the page is being written, but isn't already owned by the 1601 * top-level object, we have to copy it into a new page owned by the 1602 * top-level object. 1603 */ 1604 if (fs.object != fs.first_object) { 1605 /* 1606 * We only really need to copy if we want to write it. 1607 */ 1608 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1609 vm_fault_cow(&fs); 1610 /* 1611 * We only try to prefault read-only mappings to the 1612 * neighboring pages when this copy-on-write fault is 1613 * a hard fault. In other cases, trying to prefault 1614 * is typically wasted effort. 1615 */ 1616 if (faultcount == 0) 1617 faultcount = 1; 1618 1619 } else { 1620 fs.prot &= ~VM_PROT_WRITE; 1621 } 1622 } 1623 1624 /* 1625 * We must verify that the maps have not changed since our last 1626 * lookup. 1627 */ 1628 if (!fs.lookup_still_valid) { 1629 rv = vm_fault_relookup(&fs); 1630 if (rv != KERN_SUCCESS) { 1631 fault_deallocate(&fs); 1632 if (rv == KERN_RESTART) 1633 goto RetryFault; 1634 return (rv); 1635 } 1636 } 1637 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1638 1639 /* 1640 * If the page was filled by a pager, save the virtual address that 1641 * should be faulted on next under a sequential access pattern to the 1642 * map entry. A read lock on the map suffices to update this address 1643 * safely. 1644 */ 1645 if (hardfault) 1646 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1647 1648 /* 1649 * Page must be completely valid or it is not fit to 1650 * map into user space. vm_pager_get_pages() ensures this. 1651 */ 1652 vm_page_assert_xbusied(fs.m); 1653 KASSERT(vm_page_all_valid(fs.m), 1654 ("vm_fault: page %p partially invalid", fs.m)); 1655 1656 vm_fault_dirty(&fs, fs.m); 1657 1658 /* 1659 * Put this page into the physical map. We had to do the unlock above 1660 * because pmap_enter() may sleep. We don't put the page 1661 * back on the active queue until later so that the pageout daemon 1662 * won't find it (yet). 1663 */ 1664 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1665 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1666 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1667 fs.wired == 0) 1668 vm_fault_prefault(&fs, vaddr, 1669 faultcount > 0 ? behind : PFBAK, 1670 faultcount > 0 ? ahead : PFFOR, false); 1671 1672 /* 1673 * If the page is not wired down, then put it where the pageout daemon 1674 * can find it. 1675 */ 1676 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1677 vm_page_wire(fs.m); 1678 else 1679 vm_page_activate(fs.m); 1680 if (fs.m_hold != NULL) { 1681 (*fs.m_hold) = fs.m; 1682 vm_page_wire(fs.m); 1683 } 1684 vm_page_xunbusy(fs.m); 1685 fs.m = NULL; 1686 1687 /* 1688 * Unlock everything, and return 1689 */ 1690 fault_deallocate(&fs); 1691 if (hardfault) { 1692 VM_CNT_INC(v_io_faults); 1693 curthread->td_ru.ru_majflt++; 1694 #ifdef RACCT 1695 if (racct_enable && fs.object->type == OBJT_VNODE) { 1696 PROC_LOCK(curproc); 1697 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1698 racct_add_force(curproc, RACCT_WRITEBPS, 1699 PAGE_SIZE + behind * PAGE_SIZE); 1700 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1701 } else { 1702 racct_add_force(curproc, RACCT_READBPS, 1703 PAGE_SIZE + ahead * PAGE_SIZE); 1704 racct_add_force(curproc, RACCT_READIOPS, 1); 1705 } 1706 PROC_UNLOCK(curproc); 1707 } 1708 #endif 1709 } else 1710 curthread->td_ru.ru_minflt++; 1711 1712 return (KERN_SUCCESS); 1713 } 1714 1715 /* 1716 * Speed up the reclamation of pages that precede the faulting pindex within 1717 * the first object of the shadow chain. Essentially, perform the equivalent 1718 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1719 * the faulting pindex by the cluster size when the pages read by vm_fault() 1720 * cross a cluster-size boundary. The cluster size is the greater of the 1721 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1722 * 1723 * When "fs->first_object" is a shadow object, the pages in the backing object 1724 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1725 * function must only be concerned with pages in the first object. 1726 */ 1727 static void 1728 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1729 { 1730 vm_map_entry_t entry; 1731 vm_object_t first_object, object; 1732 vm_offset_t end, start; 1733 vm_page_t m, m_next; 1734 vm_pindex_t pend, pstart; 1735 vm_size_t size; 1736 1737 object = fs->object; 1738 VM_OBJECT_ASSERT_UNLOCKED(object); 1739 first_object = fs->first_object; 1740 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1741 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1742 VM_OBJECT_RLOCK(first_object); 1743 size = VM_FAULT_DONTNEED_MIN; 1744 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1745 size = pagesizes[1]; 1746 end = rounddown2(vaddr, size); 1747 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1748 (entry = fs->entry)->start < end) { 1749 if (end - entry->start < size) 1750 start = entry->start; 1751 else 1752 start = end - size; 1753 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1754 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1755 entry->start); 1756 m_next = vm_page_find_least(first_object, pstart); 1757 pend = OFF_TO_IDX(entry->offset) + atop(end - 1758 entry->start); 1759 while ((m = m_next) != NULL && m->pindex < pend) { 1760 m_next = TAILQ_NEXT(m, listq); 1761 if (!vm_page_all_valid(m) || 1762 vm_page_busied(m)) 1763 continue; 1764 1765 /* 1766 * Don't clear PGA_REFERENCED, since it would 1767 * likely represent a reference by a different 1768 * process. 1769 * 1770 * Typically, at this point, prefetched pages 1771 * are still in the inactive queue. Only 1772 * pages that triggered page faults are in the 1773 * active queue. The test for whether the page 1774 * is in the inactive queue is racy; in the 1775 * worst case we will requeue the page 1776 * unnecessarily. 1777 */ 1778 if (!vm_page_inactive(m)) 1779 vm_page_deactivate(m); 1780 } 1781 } 1782 VM_OBJECT_RUNLOCK(first_object); 1783 } 1784 } 1785 1786 /* 1787 * vm_fault_prefault provides a quick way of clustering 1788 * pagefaults into a processes address space. It is a "cousin" 1789 * of vm_map_pmap_enter, except it runs at page fault time instead 1790 * of mmap time. 1791 */ 1792 static void 1793 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1794 int backward, int forward, bool obj_locked) 1795 { 1796 pmap_t pmap; 1797 vm_map_entry_t entry; 1798 vm_object_t backing_object, lobject; 1799 vm_offset_t addr, starta; 1800 vm_pindex_t pindex; 1801 vm_page_t m; 1802 int i; 1803 1804 pmap = fs->map->pmap; 1805 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1806 return; 1807 1808 entry = fs->entry; 1809 1810 if (addra < backward * PAGE_SIZE) { 1811 starta = entry->start; 1812 } else { 1813 starta = addra - backward * PAGE_SIZE; 1814 if (starta < entry->start) 1815 starta = entry->start; 1816 } 1817 1818 /* 1819 * Generate the sequence of virtual addresses that are candidates for 1820 * prefaulting in an outward spiral from the faulting virtual address, 1821 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1822 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1823 * If the candidate address doesn't have a backing physical page, then 1824 * the loop immediately terminates. 1825 */ 1826 for (i = 0; i < 2 * imax(backward, forward); i++) { 1827 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1828 PAGE_SIZE); 1829 if (addr > addra + forward * PAGE_SIZE) 1830 addr = 0; 1831 1832 if (addr < starta || addr >= entry->end) 1833 continue; 1834 1835 if (!pmap_is_prefaultable(pmap, addr)) 1836 continue; 1837 1838 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1839 lobject = entry->object.vm_object; 1840 if (!obj_locked) 1841 VM_OBJECT_RLOCK(lobject); 1842 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1843 lobject->type == OBJT_DEFAULT && 1844 (backing_object = lobject->backing_object) != NULL) { 1845 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1846 0, ("vm_fault_prefault: unaligned object offset")); 1847 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1848 VM_OBJECT_RLOCK(backing_object); 1849 if (!obj_locked || lobject != entry->object.vm_object) 1850 VM_OBJECT_RUNLOCK(lobject); 1851 lobject = backing_object; 1852 } 1853 if (m == NULL) { 1854 if (!obj_locked || lobject != entry->object.vm_object) 1855 VM_OBJECT_RUNLOCK(lobject); 1856 break; 1857 } 1858 if (vm_page_all_valid(m) && 1859 (m->flags & PG_FICTITIOUS) == 0) 1860 pmap_enter_quick(pmap, addr, m, entry->protection); 1861 if (!obj_locked || lobject != entry->object.vm_object) 1862 VM_OBJECT_RUNLOCK(lobject); 1863 } 1864 } 1865 1866 /* 1867 * Hold each of the physical pages that are mapped by the specified range of 1868 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1869 * and allow the specified types of access, "prot". If all of the implied 1870 * pages are successfully held, then the number of held pages is returned 1871 * together with pointers to those pages in the array "ma". However, if any 1872 * of the pages cannot be held, -1 is returned. 1873 */ 1874 int 1875 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1876 vm_prot_t prot, vm_page_t *ma, int max_count) 1877 { 1878 vm_offset_t end, va; 1879 vm_page_t *mp; 1880 int count; 1881 boolean_t pmap_failed; 1882 1883 if (len == 0) 1884 return (0); 1885 end = round_page(addr + len); 1886 addr = trunc_page(addr); 1887 1888 if (!vm_map_range_valid(map, addr, end)) 1889 return (-1); 1890 1891 if (atop(end - addr) > max_count) 1892 panic("vm_fault_quick_hold_pages: count > max_count"); 1893 count = atop(end - addr); 1894 1895 /* 1896 * Most likely, the physical pages are resident in the pmap, so it is 1897 * faster to try pmap_extract_and_hold() first. 1898 */ 1899 pmap_failed = FALSE; 1900 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1901 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1902 if (*mp == NULL) 1903 pmap_failed = TRUE; 1904 else if ((prot & VM_PROT_WRITE) != 0 && 1905 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1906 /* 1907 * Explicitly dirty the physical page. Otherwise, the 1908 * caller's changes may go unnoticed because they are 1909 * performed through an unmanaged mapping or by a DMA 1910 * operation. 1911 * 1912 * The object lock is not held here. 1913 * See vm_page_clear_dirty_mask(). 1914 */ 1915 vm_page_dirty(*mp); 1916 } 1917 } 1918 if (pmap_failed) { 1919 /* 1920 * One or more pages could not be held by the pmap. Either no 1921 * page was mapped at the specified virtual address or that 1922 * mapping had insufficient permissions. Attempt to fault in 1923 * and hold these pages. 1924 * 1925 * If vm_fault_disable_pagefaults() was called, 1926 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1927 * acquire MD VM locks, which means we must not call 1928 * vm_fault(). Some (out of tree) callers mark 1929 * too wide a code area with vm_fault_disable_pagefaults() 1930 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1931 * the proper behaviour explicitly. 1932 */ 1933 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1934 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1935 goto error; 1936 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1937 if (*mp == NULL && vm_fault(map, va, prot, 1938 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1939 goto error; 1940 } 1941 return (count); 1942 error: 1943 for (mp = ma; mp < ma + count; mp++) 1944 if (*mp != NULL) 1945 vm_page_unwire(*mp, PQ_INACTIVE); 1946 return (-1); 1947 } 1948 1949 /* 1950 * Routine: 1951 * vm_fault_copy_entry 1952 * Function: 1953 * Create new shadow object backing dst_entry with private copy of 1954 * all underlying pages. When src_entry is equal to dst_entry, 1955 * function implements COW for wired-down map entry. Otherwise, 1956 * it forks wired entry into dst_map. 1957 * 1958 * In/out conditions: 1959 * The source and destination maps must be locked for write. 1960 * The source map entry must be wired down (or be a sharing map 1961 * entry corresponding to a main map entry that is wired down). 1962 */ 1963 void 1964 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1965 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1966 vm_ooffset_t *fork_charge) 1967 { 1968 vm_object_t backing_object, dst_object, object, src_object; 1969 vm_pindex_t dst_pindex, pindex, src_pindex; 1970 vm_prot_t access, prot; 1971 vm_offset_t vaddr; 1972 vm_page_t dst_m; 1973 vm_page_t src_m; 1974 boolean_t upgrade; 1975 1976 #ifdef lint 1977 src_map++; 1978 #endif /* lint */ 1979 1980 upgrade = src_entry == dst_entry; 1981 access = prot = dst_entry->protection; 1982 1983 src_object = src_entry->object.vm_object; 1984 src_pindex = OFF_TO_IDX(src_entry->offset); 1985 1986 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1987 dst_object = src_object; 1988 vm_object_reference(dst_object); 1989 } else { 1990 /* 1991 * Create the top-level object for the destination entry. 1992 * Doesn't actually shadow anything - we copy the pages 1993 * directly. 1994 */ 1995 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 1996 dst_entry->start), NULL, NULL, 0); 1997 #if VM_NRESERVLEVEL > 0 1998 dst_object->flags |= OBJ_COLORED; 1999 dst_object->pg_color = atop(dst_entry->start); 2000 #endif 2001 dst_object->domain = src_object->domain; 2002 dst_object->charge = dst_entry->end - dst_entry->start; 2003 } 2004 2005 VM_OBJECT_WLOCK(dst_object); 2006 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 2007 ("vm_fault_copy_entry: vm_object not NULL")); 2008 if (src_object != dst_object) { 2009 dst_entry->object.vm_object = dst_object; 2010 dst_entry->offset = 0; 2011 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 2012 } 2013 if (fork_charge != NULL) { 2014 KASSERT(dst_entry->cred == NULL, 2015 ("vm_fault_copy_entry: leaked swp charge")); 2016 dst_object->cred = curthread->td_ucred; 2017 crhold(dst_object->cred); 2018 *fork_charge += dst_object->charge; 2019 } else if ((dst_object->type == OBJT_DEFAULT || 2020 (dst_object->flags & OBJ_SWAP) != 0) && 2021 dst_object->cred == NULL) { 2022 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 2023 dst_entry)); 2024 dst_object->cred = dst_entry->cred; 2025 dst_entry->cred = NULL; 2026 } 2027 2028 /* 2029 * If not an upgrade, then enter the mappings in the pmap as 2030 * read and/or execute accesses. Otherwise, enter them as 2031 * write accesses. 2032 * 2033 * A writeable large page mapping is only created if all of 2034 * the constituent small page mappings are modified. Marking 2035 * PTEs as modified on inception allows promotion to happen 2036 * without taking potentially large number of soft faults. 2037 */ 2038 if (!upgrade) 2039 access &= ~VM_PROT_WRITE; 2040 2041 /* 2042 * Loop through all of the virtual pages within the entry's 2043 * range, copying each page from the source object to the 2044 * destination object. Since the source is wired, those pages 2045 * must exist. In contrast, the destination is pageable. 2046 * Since the destination object doesn't share any backing storage 2047 * with the source object, all of its pages must be dirtied, 2048 * regardless of whether they can be written. 2049 */ 2050 for (vaddr = dst_entry->start, dst_pindex = 0; 2051 vaddr < dst_entry->end; 2052 vaddr += PAGE_SIZE, dst_pindex++) { 2053 again: 2054 /* 2055 * Find the page in the source object, and copy it in. 2056 * Because the source is wired down, the page will be 2057 * in memory. 2058 */ 2059 if (src_object != dst_object) 2060 VM_OBJECT_RLOCK(src_object); 2061 object = src_object; 2062 pindex = src_pindex + dst_pindex; 2063 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2064 (backing_object = object->backing_object) != NULL) { 2065 /* 2066 * Unless the source mapping is read-only or 2067 * it is presently being upgraded from 2068 * read-only, the first object in the shadow 2069 * chain should provide all of the pages. In 2070 * other words, this loop body should never be 2071 * executed when the source mapping is already 2072 * read/write. 2073 */ 2074 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 2075 upgrade, 2076 ("vm_fault_copy_entry: main object missing page")); 2077 2078 VM_OBJECT_RLOCK(backing_object); 2079 pindex += OFF_TO_IDX(object->backing_object_offset); 2080 if (object != dst_object) 2081 VM_OBJECT_RUNLOCK(object); 2082 object = backing_object; 2083 } 2084 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 2085 2086 if (object != dst_object) { 2087 /* 2088 * Allocate a page in the destination object. 2089 */ 2090 dst_m = vm_page_alloc(dst_object, (src_object == 2091 dst_object ? src_pindex : 0) + dst_pindex, 2092 VM_ALLOC_NORMAL); 2093 if (dst_m == NULL) { 2094 VM_OBJECT_WUNLOCK(dst_object); 2095 VM_OBJECT_RUNLOCK(object); 2096 vm_wait(dst_object); 2097 VM_OBJECT_WLOCK(dst_object); 2098 goto again; 2099 } 2100 pmap_copy_page(src_m, dst_m); 2101 VM_OBJECT_RUNLOCK(object); 2102 dst_m->dirty = dst_m->valid = src_m->valid; 2103 } else { 2104 dst_m = src_m; 2105 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2106 goto again; 2107 if (dst_m->pindex >= dst_object->size) { 2108 /* 2109 * We are upgrading. Index can occur 2110 * out of bounds if the object type is 2111 * vnode and the file was truncated. 2112 */ 2113 vm_page_xunbusy(dst_m); 2114 break; 2115 } 2116 } 2117 VM_OBJECT_WUNLOCK(dst_object); 2118 2119 /* 2120 * Enter it in the pmap. If a wired, copy-on-write 2121 * mapping is being replaced by a write-enabled 2122 * mapping, then wire that new mapping. 2123 * 2124 * The page can be invalid if the user called 2125 * msync(MS_INVALIDATE) or truncated the backing vnode 2126 * or shared memory object. In this case, do not 2127 * insert it into pmap, but still do the copy so that 2128 * all copies of the wired map entry have similar 2129 * backing pages. 2130 */ 2131 if (vm_page_all_valid(dst_m)) { 2132 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2133 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2134 } 2135 2136 /* 2137 * Mark it no longer busy, and put it on the active list. 2138 */ 2139 VM_OBJECT_WLOCK(dst_object); 2140 2141 if (upgrade) { 2142 if (src_m != dst_m) { 2143 vm_page_unwire(src_m, PQ_INACTIVE); 2144 vm_page_wire(dst_m); 2145 } else { 2146 KASSERT(vm_page_wired(dst_m), 2147 ("dst_m %p is not wired", dst_m)); 2148 } 2149 } else { 2150 vm_page_activate(dst_m); 2151 } 2152 vm_page_xunbusy(dst_m); 2153 } 2154 VM_OBJECT_WUNLOCK(dst_object); 2155 if (upgrade) { 2156 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2157 vm_object_deallocate(src_object); 2158 } 2159 } 2160 2161 /* 2162 * Block entry into the machine-independent layer's page fault handler by 2163 * the calling thread. Subsequent calls to vm_fault() by that thread will 2164 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2165 * spurious page faults. 2166 */ 2167 int 2168 vm_fault_disable_pagefaults(void) 2169 { 2170 2171 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2172 } 2173 2174 void 2175 vm_fault_enable_pagefaults(int save) 2176 { 2177 2178 curthread_pflags_restore(save); 2179 } 2180