1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * 12 * This code is derived from software contributed to Berkeley by 13 * The Mach Operating System project at Carnegie-Mellon University. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50 * 51 * Permission to use, copy, modify and distribute this software and 52 * its documentation is hereby granted, provided that both the copyright 53 * notice and this permission notice appear in all copies of the 54 * software, derivative works or modified versions, and any portions 55 * thereof, and that both notices appear in supporting documentation. 56 * 57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60 * 61 * Carnegie Mellon requests users of this software to return to 62 * 63 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64 * School of Computer Science 65 * Carnegie Mellon University 66 * Pittsburgh PA 15213-3890 67 * 68 * any improvements or extensions that they make and grant Carnegie the 69 * rights to redistribute these changes. 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/cdefs.h> 77 __FBSDID("$FreeBSD$"); 78 79 #include "opt_ktrace.h" 80 #include "opt_vm.h" 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/mman.h> 87 #include <sys/mutex.h> 88 #include <sys/proc.h> 89 #include <sys/racct.h> 90 #include <sys/refcount.h> 91 #include <sys/resourcevar.h> 92 #include <sys/rwlock.h> 93 #include <sys/signalvar.h> 94 #include <sys/sysctl.h> 95 #include <sys/sysent.h> 96 #include <sys/vmmeter.h> 97 #include <sys/vnode.h> 98 #ifdef KTRACE 99 #include <sys/ktrace.h> 100 #endif 101 102 #include <vm/vm.h> 103 #include <vm/vm_param.h> 104 #include <vm/pmap.h> 105 #include <vm/vm_map.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_pageout.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_reserv.h> 113 114 #define PFBAK 4 115 #define PFFOR 4 116 117 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118 119 #define VM_FAULT_DONTNEED_MIN 1048576 120 121 struct faultstate { 122 /* Fault parameters. */ 123 vm_offset_t vaddr; 124 vm_page_t *m_hold; 125 vm_prot_t fault_type; 126 vm_prot_t prot; 127 int fault_flags; 128 boolean_t wired; 129 130 /* Control state. */ 131 struct timeval oom_start_time; 132 bool oom_started; 133 int nera; 134 135 /* Page reference for cow. */ 136 vm_page_t m_cow; 137 138 /* Current object. */ 139 vm_object_t object; 140 vm_pindex_t pindex; 141 vm_page_t m; 142 143 /* Top-level map object. */ 144 vm_object_t first_object; 145 vm_pindex_t first_pindex; 146 vm_page_t first_m; 147 148 /* Map state. */ 149 vm_map_t map; 150 vm_map_entry_t entry; 151 int map_generation; 152 bool lookup_still_valid; 153 154 /* Vnode if locked. */ 155 struct vnode *vp; 156 }; 157 158 /* 159 * Return codes for internal fault routines. 160 */ 161 enum fault_status { 162 FAULT_SUCCESS = 1, /* Return success to user. */ 163 FAULT_FAILURE, /* Return failure to user. */ 164 FAULT_CONTINUE, /* Continue faulting. */ 165 FAULT_RESTART, /* Restart fault. */ 166 FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 167 FAULT_HARD, /* Performed I/O. */ 168 FAULT_SOFT, /* Found valid page. */ 169 FAULT_PROTECTION_FAILURE, /* Invalid access. */ 170 }; 171 172 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 173 int ahead); 174 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 175 int backward, int forward, bool obj_locked); 176 177 static int vm_pfault_oom_attempts = 3; 178 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 179 &vm_pfault_oom_attempts, 0, 180 "Number of page allocation attempts in page fault handler before it " 181 "triggers OOM handling"); 182 183 static int vm_pfault_oom_wait = 10; 184 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 185 &vm_pfault_oom_wait, 0, 186 "Number of seconds to wait for free pages before retrying " 187 "the page fault handler"); 188 189 static inline void 190 fault_page_release(vm_page_t *mp) 191 { 192 vm_page_t m; 193 194 m = *mp; 195 if (m != NULL) { 196 /* 197 * We are likely to loop around again and attempt to busy 198 * this page. Deactivating it leaves it available for 199 * pageout while optimizing fault restarts. 200 */ 201 vm_page_deactivate(m); 202 vm_page_xunbusy(m); 203 *mp = NULL; 204 } 205 } 206 207 static inline void 208 fault_page_free(vm_page_t *mp) 209 { 210 vm_page_t m; 211 212 m = *mp; 213 if (m != NULL) { 214 VM_OBJECT_ASSERT_WLOCKED(m->object); 215 if (!vm_page_wired(m)) 216 vm_page_free(m); 217 else 218 vm_page_xunbusy(m); 219 *mp = NULL; 220 } 221 } 222 223 static inline void 224 unlock_map(struct faultstate *fs) 225 { 226 227 if (fs->lookup_still_valid) { 228 vm_map_lookup_done(fs->map, fs->entry); 229 fs->lookup_still_valid = false; 230 } 231 } 232 233 static void 234 unlock_vp(struct faultstate *fs) 235 { 236 237 if (fs->vp != NULL) { 238 vput(fs->vp); 239 fs->vp = NULL; 240 } 241 } 242 243 static void 244 fault_deallocate(struct faultstate *fs) 245 { 246 247 fault_page_release(&fs->m_cow); 248 fault_page_release(&fs->m); 249 vm_object_pip_wakeup(fs->object); 250 if (fs->object != fs->first_object) { 251 VM_OBJECT_WLOCK(fs->first_object); 252 fault_page_free(&fs->first_m); 253 VM_OBJECT_WUNLOCK(fs->first_object); 254 vm_object_pip_wakeup(fs->first_object); 255 } 256 vm_object_deallocate(fs->first_object); 257 unlock_map(fs); 258 unlock_vp(fs); 259 } 260 261 static void 262 unlock_and_deallocate(struct faultstate *fs) 263 { 264 265 VM_OBJECT_WUNLOCK(fs->object); 266 fault_deallocate(fs); 267 } 268 269 static void 270 vm_fault_dirty(struct faultstate *fs, vm_page_t m) 271 { 272 bool need_dirty; 273 274 if (((fs->prot & VM_PROT_WRITE) == 0 && 275 (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 276 (m->oflags & VPO_UNMANAGED) != 0) 277 return; 278 279 VM_PAGE_OBJECT_BUSY_ASSERT(m); 280 281 need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 282 (fs->fault_flags & VM_FAULT_WIRE) == 0) || 283 (fs->fault_flags & VM_FAULT_DIRTY) != 0; 284 285 vm_object_set_writeable_dirty(m->object); 286 287 /* 288 * If the fault is a write, we know that this page is being 289 * written NOW so dirty it explicitly to save on 290 * pmap_is_modified() calls later. 291 * 292 * Also, since the page is now dirty, we can possibly tell 293 * the pager to release any swap backing the page. 294 */ 295 if (need_dirty && vm_page_set_dirty(m) == 0) { 296 /* 297 * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 298 * if the page is already dirty to prevent data written with 299 * the expectation of being synced from not being synced. 300 * Likewise if this entry does not request NOSYNC then make 301 * sure the page isn't marked NOSYNC. Applications sharing 302 * data should use the same flags to avoid ping ponging. 303 */ 304 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 305 vm_page_aflag_set(m, PGA_NOSYNC); 306 else 307 vm_page_aflag_clear(m, PGA_NOSYNC); 308 } 309 310 } 311 312 /* 313 * Unlocks fs.first_object and fs.map on success. 314 */ 315 static enum fault_status 316 vm_fault_soft_fast(struct faultstate *fs) 317 { 318 vm_page_t m, m_map; 319 #if VM_NRESERVLEVEL > 0 320 vm_page_t m_super; 321 int flags; 322 #endif 323 int psind; 324 vm_offset_t vaddr; 325 enum fault_status res; 326 327 MPASS(fs->vp == NULL); 328 329 res = FAULT_SUCCESS; 330 vaddr = fs->vaddr; 331 vm_object_busy(fs->first_object); 332 m = vm_page_lookup(fs->first_object, fs->first_pindex); 333 /* A busy page can be mapped for read|execute access. */ 334 if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 335 vm_page_busied(m)) || !vm_page_all_valid(m)) { 336 res = FAULT_FAILURE; 337 goto out; 338 } 339 m_map = m; 340 psind = 0; 341 #if VM_NRESERVLEVEL > 0 342 if ((m->flags & PG_FICTITIOUS) == 0 && 343 (m_super = vm_reserv_to_superpage(m)) != NULL && 344 rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 345 roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 346 (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 347 (pagesizes[m_super->psind] - 1)) && !fs->wired && 348 pmap_ps_enabled(fs->map->pmap)) { 349 flags = PS_ALL_VALID; 350 if ((fs->prot & VM_PROT_WRITE) != 0) { 351 /* 352 * Create a superpage mapping allowing write access 353 * only if none of the constituent pages are busy and 354 * all of them are already dirty (except possibly for 355 * the page that was faulted on). 356 */ 357 flags |= PS_NONE_BUSY; 358 if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 359 flags |= PS_ALL_DIRTY; 360 } 361 if (vm_page_ps_test(m_super, flags, m)) { 362 m_map = m_super; 363 psind = m_super->psind; 364 vaddr = rounddown2(vaddr, pagesizes[psind]); 365 /* Preset the modified bit for dirty superpages. */ 366 if ((flags & PS_ALL_DIRTY) != 0) 367 fs->fault_type |= VM_PROT_WRITE; 368 } 369 } 370 #endif 371 if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 372 PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 373 KERN_SUCCESS) { 374 res = FAULT_FAILURE; 375 goto out; 376 } 377 if (fs->m_hold != NULL) { 378 (*fs->m_hold) = m; 379 vm_page_wire(m); 380 } 381 if (psind == 0 && !fs->wired) 382 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 383 VM_OBJECT_RUNLOCK(fs->first_object); 384 vm_fault_dirty(fs, m); 385 vm_map_lookup_done(fs->map, fs->entry); 386 curthread->td_ru.ru_minflt++; 387 388 out: 389 vm_object_unbusy(fs->first_object); 390 return (res); 391 } 392 393 static void 394 vm_fault_restore_map_lock(struct faultstate *fs) 395 { 396 397 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 398 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 399 400 if (!vm_map_trylock_read(fs->map)) { 401 VM_OBJECT_WUNLOCK(fs->first_object); 402 vm_map_lock_read(fs->map); 403 VM_OBJECT_WLOCK(fs->first_object); 404 } 405 fs->lookup_still_valid = true; 406 } 407 408 static void 409 vm_fault_populate_check_page(vm_page_t m) 410 { 411 412 /* 413 * Check each page to ensure that the pager is obeying the 414 * interface: the page must be installed in the object, fully 415 * valid, and exclusively busied. 416 */ 417 MPASS(m != NULL); 418 MPASS(vm_page_all_valid(m)); 419 MPASS(vm_page_xbusied(m)); 420 } 421 422 static void 423 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 424 vm_pindex_t last) 425 { 426 vm_page_t m; 427 vm_pindex_t pidx; 428 429 VM_OBJECT_ASSERT_WLOCKED(object); 430 MPASS(first <= last); 431 for (pidx = first, m = vm_page_lookup(object, pidx); 432 pidx <= last; pidx++, m = vm_page_next(m)) { 433 vm_fault_populate_check_page(m); 434 vm_page_deactivate(m); 435 vm_page_xunbusy(m); 436 } 437 } 438 439 static enum fault_status 440 vm_fault_populate(struct faultstate *fs) 441 { 442 vm_offset_t vaddr; 443 vm_page_t m; 444 vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 445 int bdry_idx, i, npages, psind, rv; 446 enum fault_status res; 447 448 MPASS(fs->object == fs->first_object); 449 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 450 MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 451 MPASS(fs->first_object->backing_object == NULL); 452 MPASS(fs->lookup_still_valid); 453 454 pager_first = OFF_TO_IDX(fs->entry->offset); 455 pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 456 unlock_map(fs); 457 unlock_vp(fs); 458 459 res = FAULT_SUCCESS; 460 461 /* 462 * Call the pager (driver) populate() method. 463 * 464 * There is no guarantee that the method will be called again 465 * if the current fault is for read, and a future fault is 466 * for write. Report the entry's maximum allowed protection 467 * to the driver. 468 */ 469 rv = vm_pager_populate(fs->first_object, fs->first_pindex, 470 fs->fault_type, fs->entry->max_protection, &pager_first, 471 &pager_last); 472 473 VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 474 if (rv == VM_PAGER_BAD) { 475 /* 476 * VM_PAGER_BAD is the backdoor for a pager to request 477 * normal fault handling. 478 */ 479 vm_fault_restore_map_lock(fs); 480 if (fs->map->timestamp != fs->map_generation) 481 return (FAULT_RESTART); 482 return (FAULT_CONTINUE); 483 } 484 if (rv != VM_PAGER_OK) 485 return (FAULT_FAILURE); /* AKA SIGSEGV */ 486 487 /* Ensure that the driver is obeying the interface. */ 488 MPASS(pager_first <= pager_last); 489 MPASS(fs->first_pindex <= pager_last); 490 MPASS(fs->first_pindex >= pager_first); 491 MPASS(pager_last < fs->first_object->size); 492 493 vm_fault_restore_map_lock(fs); 494 bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 495 MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 496 if (fs->map->timestamp != fs->map_generation) { 497 if (bdry_idx == 0) { 498 vm_fault_populate_cleanup(fs->first_object, pager_first, 499 pager_last); 500 } else { 501 m = vm_page_lookup(fs->first_object, pager_first); 502 if (m != fs->m) 503 vm_page_xunbusy(m); 504 } 505 return (FAULT_RESTART); 506 } 507 508 /* 509 * The map is unchanged after our last unlock. Process the fault. 510 * 511 * First, the special case of largepage mappings, where 512 * populate only busies the first page in superpage run. 513 */ 514 if (bdry_idx != 0) { 515 KASSERT(PMAP_HAS_LARGEPAGES, 516 ("missing pmap support for large pages")); 517 m = vm_page_lookup(fs->first_object, pager_first); 518 vm_fault_populate_check_page(m); 519 VM_OBJECT_WUNLOCK(fs->first_object); 520 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 521 fs->entry->offset; 522 /* assert alignment for entry */ 523 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 524 ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 525 (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 526 (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 527 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 528 ("unaligned superpage m %p %#jx", m, 529 (uintmax_t)VM_PAGE_TO_PHYS(m))); 530 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 531 fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 532 PMAP_ENTER_LARGEPAGE, bdry_idx); 533 VM_OBJECT_WLOCK(fs->first_object); 534 vm_page_xunbusy(m); 535 if (rv != KERN_SUCCESS) { 536 res = FAULT_FAILURE; 537 goto out; 538 } 539 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 540 for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 541 vm_page_wire(m + i); 542 } 543 if (fs->m_hold != NULL) { 544 *fs->m_hold = m + (fs->first_pindex - pager_first); 545 vm_page_wire(*fs->m_hold); 546 } 547 goto out; 548 } 549 550 /* 551 * The range [pager_first, pager_last] that is given to the 552 * pager is only a hint. The pager may populate any range 553 * within the object that includes the requested page index. 554 * In case the pager expanded the range, clip it to fit into 555 * the map entry. 556 */ 557 map_first = OFF_TO_IDX(fs->entry->offset); 558 if (map_first > pager_first) { 559 vm_fault_populate_cleanup(fs->first_object, pager_first, 560 map_first - 1); 561 pager_first = map_first; 562 } 563 map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 564 if (map_last < pager_last) { 565 vm_fault_populate_cleanup(fs->first_object, map_last + 1, 566 pager_last); 567 pager_last = map_last; 568 } 569 for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 570 pidx <= pager_last; 571 pidx += npages, m = vm_page_next(&m[npages - 1])) { 572 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 573 574 psind = m->psind; 575 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 576 pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 577 !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 578 psind = 0; 579 580 npages = atop(pagesizes[psind]); 581 for (i = 0; i < npages; i++) { 582 vm_fault_populate_check_page(&m[i]); 583 vm_fault_dirty(fs, &m[i]); 584 } 585 VM_OBJECT_WUNLOCK(fs->first_object); 586 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 587 (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 588 589 /* 590 * pmap_enter() may fail for a superpage mapping if additional 591 * protection policies prevent the full mapping. 592 * For example, this will happen on amd64 if the entire 593 * address range does not share the same userspace protection 594 * key. Revert to single-page mappings if this happens. 595 */ 596 MPASS(rv == KERN_SUCCESS || 597 (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 598 if (__predict_false(psind > 0 && 599 rv == KERN_PROTECTION_FAILURE)) { 600 MPASS(!fs->wired); 601 for (i = 0; i < npages; i++) { 602 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 603 &m[i], fs->prot, fs->fault_type, 0); 604 MPASS(rv == KERN_SUCCESS); 605 } 606 } 607 608 VM_OBJECT_WLOCK(fs->first_object); 609 for (i = 0; i < npages; i++) { 610 if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && 611 m[i].pindex == fs->first_pindex) 612 vm_page_wire(&m[i]); 613 else 614 vm_page_activate(&m[i]); 615 if (fs->m_hold != NULL && 616 m[i].pindex == fs->first_pindex) { 617 (*fs->m_hold) = &m[i]; 618 vm_page_wire(&m[i]); 619 } 620 vm_page_xunbusy(&m[i]); 621 } 622 } 623 out: 624 curthread->td_ru.ru_majflt++; 625 return (res); 626 } 627 628 static int prot_fault_translation; 629 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 630 &prot_fault_translation, 0, 631 "Control signal to deliver on protection fault"); 632 633 /* compat definition to keep common code for signal translation */ 634 #define UCODE_PAGEFLT 12 635 #ifdef T_PAGEFLT 636 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 637 #endif 638 639 /* 640 * vm_fault_trap: 641 * 642 * Handle a page fault occurring at the given address, 643 * requiring the given permissions, in the map specified. 644 * If successful, the page is inserted into the 645 * associated physical map. 646 * 647 * NOTE: the given address should be truncated to the 648 * proper page address. 649 * 650 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 651 * a standard error specifying why the fault is fatal is returned. 652 * 653 * The map in question must be referenced, and remains so. 654 * Caller may hold no locks. 655 */ 656 int 657 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 658 int fault_flags, int *signo, int *ucode) 659 { 660 int result; 661 662 MPASS(signo == NULL || ucode != NULL); 663 #ifdef KTRACE 664 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 665 ktrfault(vaddr, fault_type); 666 #endif 667 result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 668 NULL); 669 KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 670 result == KERN_INVALID_ADDRESS || 671 result == KERN_RESOURCE_SHORTAGE || 672 result == KERN_PROTECTION_FAILURE || 673 result == KERN_OUT_OF_BOUNDS, 674 ("Unexpected Mach error %d from vm_fault()", result)); 675 #ifdef KTRACE 676 if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 677 ktrfaultend(result); 678 #endif 679 if (result != KERN_SUCCESS && signo != NULL) { 680 switch (result) { 681 case KERN_FAILURE: 682 case KERN_INVALID_ADDRESS: 683 *signo = SIGSEGV; 684 *ucode = SEGV_MAPERR; 685 break; 686 case KERN_RESOURCE_SHORTAGE: 687 *signo = SIGBUS; 688 *ucode = BUS_OOMERR; 689 break; 690 case KERN_OUT_OF_BOUNDS: 691 *signo = SIGBUS; 692 *ucode = BUS_OBJERR; 693 break; 694 case KERN_PROTECTION_FAILURE: 695 if (prot_fault_translation == 0) { 696 /* 697 * Autodetect. This check also covers 698 * the images without the ABI-tag ELF 699 * note. 700 */ 701 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 702 curproc->p_osrel >= P_OSREL_SIGSEGV) { 703 *signo = SIGSEGV; 704 *ucode = SEGV_ACCERR; 705 } else { 706 *signo = SIGBUS; 707 *ucode = UCODE_PAGEFLT; 708 } 709 } else if (prot_fault_translation == 1) { 710 /* Always compat mode. */ 711 *signo = SIGBUS; 712 *ucode = UCODE_PAGEFLT; 713 } else { 714 /* Always SIGSEGV mode. */ 715 *signo = SIGSEGV; 716 *ucode = SEGV_ACCERR; 717 } 718 break; 719 default: 720 KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 721 result)); 722 break; 723 } 724 } 725 return (result); 726 } 727 728 static enum fault_status 729 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 730 { 731 struct vnode *vp; 732 int error, locked; 733 734 if (fs->object->type != OBJT_VNODE) 735 return (FAULT_CONTINUE); 736 vp = fs->object->handle; 737 if (vp == fs->vp) { 738 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 739 return (FAULT_CONTINUE); 740 } 741 742 /* 743 * Perform an unlock in case the desired vnode changed while 744 * the map was unlocked during a retry. 745 */ 746 unlock_vp(fs); 747 748 locked = VOP_ISLOCKED(vp); 749 if (locked != LK_EXCLUSIVE) 750 locked = LK_SHARED; 751 752 /* 753 * We must not sleep acquiring the vnode lock while we have 754 * the page exclusive busied or the object's 755 * paging-in-progress count incremented. Otherwise, we could 756 * deadlock. 757 */ 758 error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 759 if (error == 0) { 760 fs->vp = vp; 761 return (FAULT_CONTINUE); 762 } 763 764 vhold(vp); 765 if (objlocked) 766 unlock_and_deallocate(fs); 767 else 768 fault_deallocate(fs); 769 error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 770 vdrop(vp); 771 fs->vp = vp; 772 KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 773 return (FAULT_RESTART); 774 } 775 776 /* 777 * Calculate the desired readahead. Handle drop-behind. 778 * 779 * Returns the number of readahead blocks to pass to the pager. 780 */ 781 static int 782 vm_fault_readahead(struct faultstate *fs) 783 { 784 int era, nera; 785 u_char behavior; 786 787 KASSERT(fs->lookup_still_valid, ("map unlocked")); 788 era = fs->entry->read_ahead; 789 behavior = vm_map_entry_behavior(fs->entry); 790 if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 791 nera = 0; 792 } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 793 nera = VM_FAULT_READ_AHEAD_MAX; 794 if (fs->vaddr == fs->entry->next_read) 795 vm_fault_dontneed(fs, fs->vaddr, nera); 796 } else if (fs->vaddr == fs->entry->next_read) { 797 /* 798 * This is a sequential fault. Arithmetically 799 * increase the requested number of pages in 800 * the read-ahead window. The requested 801 * number of pages is "# of sequential faults 802 * x (read ahead min + 1) + read ahead min" 803 */ 804 nera = VM_FAULT_READ_AHEAD_MIN; 805 if (era > 0) { 806 nera += era + 1; 807 if (nera > VM_FAULT_READ_AHEAD_MAX) 808 nera = VM_FAULT_READ_AHEAD_MAX; 809 } 810 if (era == VM_FAULT_READ_AHEAD_MAX) 811 vm_fault_dontneed(fs, fs->vaddr, nera); 812 } else { 813 /* 814 * This is a non-sequential fault. 815 */ 816 nera = 0; 817 } 818 if (era != nera) { 819 /* 820 * A read lock on the map suffices to update 821 * the read ahead count safely. 822 */ 823 fs->entry->read_ahead = nera; 824 } 825 826 return (nera); 827 } 828 829 static int 830 vm_fault_lookup(struct faultstate *fs) 831 { 832 int result; 833 834 KASSERT(!fs->lookup_still_valid, 835 ("vm_fault_lookup: Map already locked.")); 836 result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 837 VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 838 &fs->first_pindex, &fs->prot, &fs->wired); 839 if (result != KERN_SUCCESS) { 840 unlock_vp(fs); 841 return (result); 842 } 843 844 fs->map_generation = fs->map->timestamp; 845 846 if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 847 panic("%s: fault on nofault entry, addr: %#lx", 848 __func__, (u_long)fs->vaddr); 849 } 850 851 if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 852 fs->entry->wiring_thread != curthread) { 853 vm_map_unlock_read(fs->map); 854 vm_map_lock(fs->map); 855 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 856 (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 857 unlock_vp(fs); 858 fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 859 vm_map_unlock_and_wait(fs->map, 0); 860 } else 861 vm_map_unlock(fs->map); 862 return (KERN_RESOURCE_SHORTAGE); 863 } 864 865 MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 866 867 if (fs->wired) 868 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 869 else 870 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 871 ("!fs->wired && VM_FAULT_WIRE")); 872 fs->lookup_still_valid = true; 873 874 return (KERN_SUCCESS); 875 } 876 877 static int 878 vm_fault_relookup(struct faultstate *fs) 879 { 880 vm_object_t retry_object; 881 vm_pindex_t retry_pindex; 882 vm_prot_t retry_prot; 883 int result; 884 885 if (!vm_map_trylock_read(fs->map)) 886 return (KERN_RESTART); 887 888 fs->lookup_still_valid = true; 889 if (fs->map->timestamp == fs->map_generation) 890 return (KERN_SUCCESS); 891 892 result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 893 &fs->entry, &retry_object, &retry_pindex, &retry_prot, 894 &fs->wired); 895 if (result != KERN_SUCCESS) { 896 /* 897 * If retry of map lookup would have blocked then 898 * retry fault from start. 899 */ 900 if (result == KERN_FAILURE) 901 return (KERN_RESTART); 902 return (result); 903 } 904 if (retry_object != fs->first_object || 905 retry_pindex != fs->first_pindex) 906 return (KERN_RESTART); 907 908 /* 909 * Check whether the protection has changed or the object has 910 * been copied while we left the map unlocked. Changing from 911 * read to write permission is OK - we leave the page 912 * write-protected, and catch the write fault. Changing from 913 * write to read permission means that we can't mark the page 914 * write-enabled after all. 915 */ 916 fs->prot &= retry_prot; 917 fs->fault_type &= retry_prot; 918 if (fs->prot == 0) 919 return (KERN_RESTART); 920 921 /* Reassert because wired may have changed. */ 922 KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 923 ("!wired && VM_FAULT_WIRE")); 924 925 return (KERN_SUCCESS); 926 } 927 928 static void 929 vm_fault_cow(struct faultstate *fs) 930 { 931 bool is_first_object_locked; 932 933 KASSERT(fs->object != fs->first_object, 934 ("source and target COW objects are identical")); 935 936 /* 937 * This allows pages to be virtually copied from a backing_object 938 * into the first_object, where the backing object has no other 939 * refs to it, and cannot gain any more refs. Instead of a bcopy, 940 * we just move the page from the backing object to the first 941 * object. Note that we must mark the page dirty in the first 942 * object so that it will go out to swap when needed. 943 */ 944 is_first_object_locked = false; 945 if ( 946 /* 947 * Only one shadow object and no other refs. 948 */ 949 fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 950 /* 951 * No other ways to look the object up 952 */ 953 fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 954 /* 955 * We don't chase down the shadow chain and we can acquire locks. 956 */ 957 (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 958 fs->object == fs->first_object->backing_object && 959 VM_OBJECT_TRYWLOCK(fs->object)) { 960 /* 961 * Remove but keep xbusy for replace. fs->m is moved into 962 * fs->first_object and left busy while fs->first_m is 963 * conditionally freed. 964 */ 965 vm_page_remove_xbusy(fs->m); 966 vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 967 fs->first_m); 968 vm_page_dirty(fs->m); 969 #if VM_NRESERVLEVEL > 0 970 /* 971 * Rename the reservation. 972 */ 973 vm_reserv_rename(fs->m, fs->first_object, fs->object, 974 OFF_TO_IDX(fs->first_object->backing_object_offset)); 975 #endif 976 VM_OBJECT_WUNLOCK(fs->object); 977 VM_OBJECT_WUNLOCK(fs->first_object); 978 fs->first_m = fs->m; 979 fs->m = NULL; 980 VM_CNT_INC(v_cow_optim); 981 } else { 982 if (is_first_object_locked) 983 VM_OBJECT_WUNLOCK(fs->first_object); 984 /* 985 * Oh, well, lets copy it. 986 */ 987 pmap_copy_page(fs->m, fs->first_m); 988 vm_page_valid(fs->first_m); 989 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 990 vm_page_wire(fs->first_m); 991 vm_page_unwire(fs->m, PQ_INACTIVE); 992 } 993 /* 994 * Save the cow page to be released after 995 * pmap_enter is complete. 996 */ 997 fs->m_cow = fs->m; 998 fs->m = NULL; 999 1000 /* 1001 * Typically, the shadow object is either private to this 1002 * address space (OBJ_ONEMAPPING) or its pages are read only. 1003 * In the highly unusual case where the pages of a shadow object 1004 * are read/write shared between this and other address spaces, 1005 * we need to ensure that any pmap-level mappings to the 1006 * original, copy-on-write page from the backing object are 1007 * removed from those other address spaces. 1008 * 1009 * The flag check is racy, but this is tolerable: if 1010 * OBJ_ONEMAPPING is cleared after the check, the busy state 1011 * ensures that new mappings of m_cow can't be created. 1012 * pmap_enter() will replace an existing mapping in the current 1013 * address space. If OBJ_ONEMAPPING is set after the check, 1014 * removing mappings will at worse trigger some unnecessary page 1015 * faults. 1016 */ 1017 vm_page_assert_xbusied(fs->m_cow); 1018 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1019 pmap_remove_all(fs->m_cow); 1020 } 1021 1022 vm_object_pip_wakeup(fs->object); 1023 1024 /* 1025 * Only use the new page below... 1026 */ 1027 fs->object = fs->first_object; 1028 fs->pindex = fs->first_pindex; 1029 fs->m = fs->first_m; 1030 VM_CNT_INC(v_cow_faults); 1031 curthread->td_cow++; 1032 } 1033 1034 static bool 1035 vm_fault_next(struct faultstate *fs) 1036 { 1037 vm_object_t next_object; 1038 1039 /* 1040 * The requested page does not exist at this object/ 1041 * offset. Remove the invalid page from the object, 1042 * waking up anyone waiting for it, and continue on to 1043 * the next object. However, if this is the top-level 1044 * object, we must leave the busy page in place to 1045 * prevent another process from rushing past us, and 1046 * inserting the page in that object at the same time 1047 * that we are. 1048 */ 1049 if (fs->object == fs->first_object) { 1050 fs->first_m = fs->m; 1051 fs->m = NULL; 1052 } else 1053 fault_page_free(&fs->m); 1054 1055 /* 1056 * Move on to the next object. Lock the next object before 1057 * unlocking the current one. 1058 */ 1059 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1060 next_object = fs->object->backing_object; 1061 if (next_object == NULL) 1062 return (false); 1063 MPASS(fs->first_m != NULL); 1064 KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1065 VM_OBJECT_WLOCK(next_object); 1066 vm_object_pip_add(next_object, 1); 1067 if (fs->object != fs->first_object) 1068 vm_object_pip_wakeup(fs->object); 1069 fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1070 VM_OBJECT_WUNLOCK(fs->object); 1071 fs->object = next_object; 1072 1073 return (true); 1074 } 1075 1076 static void 1077 vm_fault_zerofill(struct faultstate *fs) 1078 { 1079 1080 /* 1081 * If there's no object left, fill the page in the top 1082 * object with zeros. 1083 */ 1084 if (fs->object != fs->first_object) { 1085 vm_object_pip_wakeup(fs->object); 1086 fs->object = fs->first_object; 1087 fs->pindex = fs->first_pindex; 1088 } 1089 MPASS(fs->first_m != NULL); 1090 MPASS(fs->m == NULL); 1091 fs->m = fs->first_m; 1092 fs->first_m = NULL; 1093 1094 /* 1095 * Zero the page if necessary and mark it valid. 1096 */ 1097 if ((fs->m->flags & PG_ZERO) == 0) { 1098 pmap_zero_page(fs->m); 1099 } else { 1100 VM_CNT_INC(v_ozfod); 1101 } 1102 VM_CNT_INC(v_zfod); 1103 vm_page_valid(fs->m); 1104 } 1105 1106 /* 1107 * Initiate page fault after timeout. Returns true if caller should 1108 * do vm_waitpfault() after the call. 1109 */ 1110 static bool 1111 vm_fault_allocate_oom(struct faultstate *fs) 1112 { 1113 struct timeval now; 1114 1115 unlock_and_deallocate(fs); 1116 if (vm_pfault_oom_attempts < 0) 1117 return (true); 1118 if (!fs->oom_started) { 1119 fs->oom_started = true; 1120 getmicrotime(&fs->oom_start_time); 1121 return (true); 1122 } 1123 1124 getmicrotime(&now); 1125 timevalsub(&now, &fs->oom_start_time); 1126 if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1127 return (true); 1128 1129 if (bootverbose) 1130 printf( 1131 "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1132 curproc->p_pid, curproc->p_comm); 1133 vm_pageout_oom(VM_OOM_MEM_PF); 1134 fs->oom_started = false; 1135 return (false); 1136 } 1137 1138 /* 1139 * Allocate a page directly or via the object populate method. 1140 */ 1141 static enum fault_status 1142 vm_fault_allocate(struct faultstate *fs) 1143 { 1144 struct domainset *dset; 1145 enum fault_status res; 1146 1147 if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1148 res = vm_fault_lock_vnode(fs, true); 1149 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1150 if (res == FAULT_RESTART) 1151 return (res); 1152 } 1153 1154 if (fs->pindex >= fs->object->size) { 1155 unlock_and_deallocate(fs); 1156 return (FAULT_OUT_OF_BOUNDS); 1157 } 1158 1159 if (fs->object == fs->first_object && 1160 (fs->first_object->flags & OBJ_POPULATE) != 0 && 1161 fs->first_object->shadow_count == 0) { 1162 res = vm_fault_populate(fs); 1163 switch (res) { 1164 case FAULT_SUCCESS: 1165 case FAULT_FAILURE: 1166 case FAULT_RESTART: 1167 unlock_and_deallocate(fs); 1168 return (res); 1169 case FAULT_CONTINUE: 1170 /* 1171 * Pager's populate() method 1172 * returned VM_PAGER_BAD. 1173 */ 1174 break; 1175 default: 1176 panic("inconsistent return codes"); 1177 } 1178 } 1179 1180 /* 1181 * Allocate a new page for this object/offset pair. 1182 * 1183 * If the process has a fatal signal pending, prioritize the allocation 1184 * with the expectation that the process will exit shortly and free some 1185 * pages. In particular, the signal may have been posted by the page 1186 * daemon in an attempt to resolve an out-of-memory condition. 1187 * 1188 * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1189 * might be not observed here, and allocation fails, causing a restart 1190 * and new reading of the p_flag. 1191 */ 1192 dset = fs->object->domain.dr_policy; 1193 if (dset == NULL) 1194 dset = curthread->td_domain.dr_policy; 1195 if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1196 #if VM_NRESERVLEVEL > 0 1197 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1198 #endif 1199 fs->m = vm_page_alloc(fs->object, fs->pindex, 1200 P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0); 1201 } 1202 if (fs->m == NULL) { 1203 if (vm_fault_allocate_oom(fs)) 1204 vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1205 return (FAULT_RESTART); 1206 } 1207 fs->oom_started = false; 1208 1209 return (FAULT_CONTINUE); 1210 } 1211 1212 /* 1213 * Call the pager to retrieve the page if there is a chance 1214 * that the pager has it, and potentially retrieve additional 1215 * pages at the same time. 1216 */ 1217 static enum fault_status 1218 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 1219 { 1220 vm_offset_t e_end, e_start; 1221 int ahead, behind, cluster_offset, rv; 1222 enum fault_status status; 1223 u_char behavior; 1224 1225 /* 1226 * Prepare for unlocking the map. Save the map 1227 * entry's start and end addresses, which are used to 1228 * optimize the size of the pager operation below. 1229 * Even if the map entry's addresses change after 1230 * unlocking the map, using the saved addresses is 1231 * safe. 1232 */ 1233 e_start = fs->entry->start; 1234 e_end = fs->entry->end; 1235 behavior = vm_map_entry_behavior(fs->entry); 1236 1237 /* 1238 * If the pager for the current object might have 1239 * the page, then determine the number of additional 1240 * pages to read and potentially reprioritize 1241 * previously read pages for earlier reclamation. 1242 * These operations should only be performed once per 1243 * page fault. Even if the current pager doesn't 1244 * have the page, the number of additional pages to 1245 * read will apply to subsequent objects in the 1246 * shadow chain. 1247 */ 1248 if (fs->nera == -1 && !P_KILLED(curproc)) 1249 fs->nera = vm_fault_readahead(fs); 1250 1251 /* 1252 * Release the map lock before locking the vnode or 1253 * sleeping in the pager. (If the current object has 1254 * a shadow, then an earlier iteration of this loop 1255 * may have already unlocked the map.) 1256 */ 1257 unlock_map(fs); 1258 1259 status = vm_fault_lock_vnode(fs, false); 1260 MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1261 if (status == FAULT_RESTART) 1262 return (status); 1263 KASSERT(fs->vp == NULL || !fs->map->system_map, 1264 ("vm_fault: vnode-backed object mapped by system map")); 1265 1266 /* 1267 * Page in the requested page and hint the pager, 1268 * that it may bring up surrounding pages. 1269 */ 1270 if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 1271 P_KILLED(curproc)) { 1272 behind = 0; 1273 ahead = 0; 1274 } else { 1275 /* Is this a sequential fault? */ 1276 if (fs->nera > 0) { 1277 behind = 0; 1278 ahead = fs->nera; 1279 } else { 1280 /* 1281 * Request a cluster of pages that is 1282 * aligned to a VM_FAULT_READ_DEFAULT 1283 * page offset boundary within the 1284 * object. Alignment to a page offset 1285 * boundary is more likely to coincide 1286 * with the underlying file system 1287 * block than alignment to a virtual 1288 * address boundary. 1289 */ 1290 cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 1291 behind = ulmin(cluster_offset, 1292 atop(fs->vaddr - e_start)); 1293 ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 1294 } 1295 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 1296 } 1297 *behindp = behind; 1298 *aheadp = ahead; 1299 rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 1300 if (rv == VM_PAGER_OK) 1301 return (FAULT_HARD); 1302 if (rv == VM_PAGER_ERROR) 1303 printf("vm_fault: pager read error, pid %d (%s)\n", 1304 curproc->p_pid, curproc->p_comm); 1305 /* 1306 * If an I/O error occurred or the requested page was 1307 * outside the range of the pager, clean up and return 1308 * an error. 1309 */ 1310 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1311 VM_OBJECT_WLOCK(fs->object); 1312 fault_page_free(&fs->m); 1313 unlock_and_deallocate(fs); 1314 return (FAULT_OUT_OF_BOUNDS); 1315 } 1316 KASSERT(rv == VM_PAGER_FAIL, 1317 ("%s: unexpected pager error %d", __func__, rv)); 1318 return (FAULT_CONTINUE); 1319 } 1320 1321 /* 1322 * Wait/Retry if the page is busy. We have to do this if the page is 1323 * either exclusive or shared busy because the vm_pager may be using 1324 * read busy for pageouts (and even pageins if it is the vnode pager), 1325 * and we could end up trying to pagein and pageout the same page 1326 * simultaneously. 1327 * 1328 * We can theoretically allow the busy case on a read fault if the page 1329 * is marked valid, but since such pages are typically already pmap'd, 1330 * putting that special case in might be more effort then it is worth. 1331 * We cannot under any circumstances mess around with a shared busied 1332 * page except, perhaps, to pmap it. 1333 */ 1334 static void 1335 vm_fault_busy_sleep(struct faultstate *fs) 1336 { 1337 /* 1338 * Reference the page before unlocking and 1339 * sleeping so that the page daemon is less 1340 * likely to reclaim it. 1341 */ 1342 vm_page_aflag_set(fs->m, PGA_REFERENCED); 1343 if (fs->object != fs->first_object) { 1344 fault_page_release(&fs->first_m); 1345 vm_object_pip_wakeup(fs->first_object); 1346 } 1347 vm_object_pip_wakeup(fs->object); 1348 unlock_map(fs); 1349 if (fs->m != vm_page_lookup(fs->object, fs->pindex) || 1350 !vm_page_busy_sleep(fs->m, "vmpfw", 0)) 1351 VM_OBJECT_WUNLOCK(fs->object); 1352 VM_CNT_INC(v_intrans); 1353 vm_object_deallocate(fs->first_object); 1354 } 1355 1356 /* 1357 * Handle page lookup, populate, allocate, page-in for the current 1358 * object. 1359 * 1360 * The object is locked on entry and will remain locked with a return 1361 * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1362 * Otherwise, the object will be unlocked upon return. 1363 */ 1364 static enum fault_status 1365 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1366 { 1367 enum fault_status res; 1368 bool dead; 1369 1370 /* 1371 * If the object is marked for imminent termination, we retry 1372 * here, since the collapse pass has raced with us. Otherwise, 1373 * if we see terminally dead object, return fail. 1374 */ 1375 if ((fs->object->flags & OBJ_DEAD) != 0) { 1376 dead = fs->object->type == OBJT_DEAD; 1377 unlock_and_deallocate(fs); 1378 if (dead) 1379 return (FAULT_PROTECTION_FAILURE); 1380 pause("vmf_de", 1); 1381 return (FAULT_RESTART); 1382 } 1383 1384 /* 1385 * See if the page is resident. 1386 */ 1387 fs->m = vm_page_lookup(fs->object, fs->pindex); 1388 if (fs->m != NULL) { 1389 if (!vm_page_tryxbusy(fs->m)) { 1390 vm_fault_busy_sleep(fs); 1391 return (FAULT_RESTART); 1392 } 1393 1394 /* 1395 * The page is marked busy for other processes and the 1396 * pagedaemon. If it is still completely valid we are 1397 * done. 1398 */ 1399 if (vm_page_all_valid(fs->m)) { 1400 VM_OBJECT_WUNLOCK(fs->object); 1401 return (FAULT_SOFT); 1402 } 1403 } 1404 VM_OBJECT_ASSERT_WLOCKED(fs->object); 1405 1406 /* 1407 * Page is not resident. If the pager might contain the page 1408 * or this is the beginning of the search, allocate a new 1409 * page. (Default objects are zero-fill, so there is no real 1410 * pager for them.) 1411 */ 1412 if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT || 1413 fs->object == fs->first_object)) { 1414 res = vm_fault_allocate(fs); 1415 if (res != FAULT_CONTINUE) 1416 return (res); 1417 } 1418 1419 /* 1420 * Default objects have no pager so no exclusive busy exists 1421 * to protect this page in the chain. Skip to the next 1422 * object without dropping the lock to preserve atomicity of 1423 * shadow faults. 1424 */ 1425 if (fs->object->type != OBJT_DEFAULT) { 1426 /* 1427 * At this point, we have either allocated a new page 1428 * or found an existing page that is only partially 1429 * valid. 1430 * 1431 * We hold a reference on the current object and the 1432 * page is exclusive busied. The exclusive busy 1433 * prevents simultaneous faults and collapses while 1434 * the object lock is dropped. 1435 */ 1436 VM_OBJECT_WUNLOCK(fs->object); 1437 res = vm_fault_getpages(fs, behindp, aheadp); 1438 if (res == FAULT_CONTINUE) 1439 VM_OBJECT_WLOCK(fs->object); 1440 } else { 1441 res = FAULT_CONTINUE; 1442 } 1443 return (res); 1444 } 1445 1446 int 1447 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1448 int fault_flags, vm_page_t *m_hold) 1449 { 1450 struct faultstate fs; 1451 int ahead, behind, faultcount, rv; 1452 enum fault_status res; 1453 bool hardfault; 1454 1455 VM_CNT_INC(v_vm_faults); 1456 1457 if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1458 return (KERN_PROTECTION_FAILURE); 1459 1460 fs.vp = NULL; 1461 fs.vaddr = vaddr; 1462 fs.m_hold = m_hold; 1463 fs.fault_flags = fault_flags; 1464 fs.map = map; 1465 fs.lookup_still_valid = false; 1466 fs.oom_started = false; 1467 fs.nera = -1; 1468 faultcount = 0; 1469 hardfault = false; 1470 1471 RetryFault: 1472 fs.fault_type = fault_type; 1473 1474 /* 1475 * Find the backing store object and offset into it to begin the 1476 * search. 1477 */ 1478 rv = vm_fault_lookup(&fs); 1479 if (rv != KERN_SUCCESS) { 1480 if (rv == KERN_RESOURCE_SHORTAGE) 1481 goto RetryFault; 1482 return (rv); 1483 } 1484 1485 /* 1486 * Try to avoid lock contention on the top-level object through 1487 * special-case handling of some types of page faults, specifically, 1488 * those that are mapping an existing page from the top-level object. 1489 * Under this condition, a read lock on the object suffices, allowing 1490 * multiple page faults of a similar type to run in parallel. 1491 */ 1492 if (fs.vp == NULL /* avoid locked vnode leak */ && 1493 (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 1494 (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1495 VM_OBJECT_RLOCK(fs.first_object); 1496 res = vm_fault_soft_fast(&fs); 1497 if (res == FAULT_SUCCESS) 1498 return (KERN_SUCCESS); 1499 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1500 VM_OBJECT_RUNLOCK(fs.first_object); 1501 VM_OBJECT_WLOCK(fs.first_object); 1502 } 1503 } else { 1504 VM_OBJECT_WLOCK(fs.first_object); 1505 } 1506 1507 /* 1508 * Make a reference to this object to prevent its disposal while we 1509 * are messing with it. Once we have the reference, the map is free 1510 * to be diddled. Since objects reference their shadows (and copies), 1511 * they will stay around as well. 1512 * 1513 * Bump the paging-in-progress count to prevent size changes (e.g. 1514 * truncation operations) during I/O. 1515 */ 1516 vm_object_reference_locked(fs.first_object); 1517 vm_object_pip_add(fs.first_object, 1); 1518 1519 fs.m_cow = fs.m = fs.first_m = NULL; 1520 1521 /* 1522 * Search for the page at object/offset. 1523 */ 1524 fs.object = fs.first_object; 1525 fs.pindex = fs.first_pindex; 1526 1527 if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1528 res = vm_fault_allocate(&fs); 1529 switch (res) { 1530 case FAULT_RESTART: 1531 goto RetryFault; 1532 case FAULT_SUCCESS: 1533 return (KERN_SUCCESS); 1534 case FAULT_FAILURE: 1535 return (KERN_FAILURE); 1536 case FAULT_OUT_OF_BOUNDS: 1537 return (KERN_OUT_OF_BOUNDS); 1538 case FAULT_CONTINUE: 1539 break; 1540 default: 1541 panic("vm_fault: Unhandled status %d", res); 1542 } 1543 } 1544 1545 while (TRUE) { 1546 KASSERT(fs.m == NULL, 1547 ("page still set %p at loop start", fs.m)); 1548 1549 res = vm_fault_object(&fs, &behind, &ahead); 1550 switch (res) { 1551 case FAULT_SOFT: 1552 goto found; 1553 case FAULT_HARD: 1554 faultcount = behind + 1 + ahead; 1555 hardfault = true; 1556 goto found; 1557 case FAULT_RESTART: 1558 goto RetryFault; 1559 case FAULT_SUCCESS: 1560 return (KERN_SUCCESS); 1561 case FAULT_FAILURE: 1562 return (KERN_FAILURE); 1563 case FAULT_OUT_OF_BOUNDS: 1564 return (KERN_OUT_OF_BOUNDS); 1565 case FAULT_PROTECTION_FAILURE: 1566 return (KERN_PROTECTION_FAILURE); 1567 case FAULT_CONTINUE: 1568 break; 1569 default: 1570 panic("vm_fault: Unhandled status %d", res); 1571 } 1572 1573 /* 1574 * The page was not found in the current object. Try to 1575 * traverse into a backing object or zero fill if none is 1576 * found. 1577 */ 1578 if (vm_fault_next(&fs)) 1579 continue; 1580 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1581 if (fs.first_object == fs.object) 1582 fault_page_free(&fs.first_m); 1583 unlock_and_deallocate(&fs); 1584 return (KERN_OUT_OF_BOUNDS); 1585 } 1586 VM_OBJECT_WUNLOCK(fs.object); 1587 vm_fault_zerofill(&fs); 1588 /* Don't try to prefault neighboring pages. */ 1589 faultcount = 1; 1590 break; 1591 } 1592 1593 found: 1594 /* 1595 * A valid page has been found and exclusively busied. The 1596 * object lock must no longer be held. 1597 */ 1598 vm_page_assert_xbusied(fs.m); 1599 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1600 1601 /* 1602 * If the page is being written, but isn't already owned by the 1603 * top-level object, we have to copy it into a new page owned by the 1604 * top-level object. 1605 */ 1606 if (fs.object != fs.first_object) { 1607 /* 1608 * We only really need to copy if we want to write it. 1609 */ 1610 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1611 vm_fault_cow(&fs); 1612 /* 1613 * We only try to prefault read-only mappings to the 1614 * neighboring pages when this copy-on-write fault is 1615 * a hard fault. In other cases, trying to prefault 1616 * is typically wasted effort. 1617 */ 1618 if (faultcount == 0) 1619 faultcount = 1; 1620 1621 } else { 1622 fs.prot &= ~VM_PROT_WRITE; 1623 } 1624 } 1625 1626 /* 1627 * We must verify that the maps have not changed since our last 1628 * lookup. 1629 */ 1630 if (!fs.lookup_still_valid) { 1631 rv = vm_fault_relookup(&fs); 1632 if (rv != KERN_SUCCESS) { 1633 fault_deallocate(&fs); 1634 if (rv == KERN_RESTART) 1635 goto RetryFault; 1636 return (rv); 1637 } 1638 } 1639 VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1640 1641 /* 1642 * If the page was filled by a pager, save the virtual address that 1643 * should be faulted on next under a sequential access pattern to the 1644 * map entry. A read lock on the map suffices to update this address 1645 * safely. 1646 */ 1647 if (hardfault) 1648 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1649 1650 /* 1651 * Page must be completely valid or it is not fit to 1652 * map into user space. vm_pager_get_pages() ensures this. 1653 */ 1654 vm_page_assert_xbusied(fs.m); 1655 KASSERT(vm_page_all_valid(fs.m), 1656 ("vm_fault: page %p partially invalid", fs.m)); 1657 1658 vm_fault_dirty(&fs, fs.m); 1659 1660 /* 1661 * Put this page into the physical map. We had to do the unlock above 1662 * because pmap_enter() may sleep. We don't put the page 1663 * back on the active queue until later so that the pageout daemon 1664 * won't find it (yet). 1665 */ 1666 pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 1667 fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 1668 if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 1669 fs.wired == 0) 1670 vm_fault_prefault(&fs, vaddr, 1671 faultcount > 0 ? behind : PFBAK, 1672 faultcount > 0 ? ahead : PFFOR, false); 1673 1674 /* 1675 * If the page is not wired down, then put it where the pageout daemon 1676 * can find it. 1677 */ 1678 if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 1679 vm_page_wire(fs.m); 1680 else 1681 vm_page_activate(fs.m); 1682 if (fs.m_hold != NULL) { 1683 (*fs.m_hold) = fs.m; 1684 vm_page_wire(fs.m); 1685 } 1686 vm_page_xunbusy(fs.m); 1687 fs.m = NULL; 1688 1689 /* 1690 * Unlock everything, and return 1691 */ 1692 fault_deallocate(&fs); 1693 if (hardfault) { 1694 VM_CNT_INC(v_io_faults); 1695 curthread->td_ru.ru_majflt++; 1696 #ifdef RACCT 1697 if (racct_enable && fs.object->type == OBJT_VNODE) { 1698 PROC_LOCK(curproc); 1699 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1700 racct_add_force(curproc, RACCT_WRITEBPS, 1701 PAGE_SIZE + behind * PAGE_SIZE); 1702 racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1703 } else { 1704 racct_add_force(curproc, RACCT_READBPS, 1705 PAGE_SIZE + ahead * PAGE_SIZE); 1706 racct_add_force(curproc, RACCT_READIOPS, 1); 1707 } 1708 PROC_UNLOCK(curproc); 1709 } 1710 #endif 1711 } else 1712 curthread->td_ru.ru_minflt++; 1713 1714 return (KERN_SUCCESS); 1715 } 1716 1717 /* 1718 * Speed up the reclamation of pages that precede the faulting pindex within 1719 * the first object of the shadow chain. Essentially, perform the equivalent 1720 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1721 * the faulting pindex by the cluster size when the pages read by vm_fault() 1722 * cross a cluster-size boundary. The cluster size is the greater of the 1723 * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1724 * 1725 * When "fs->first_object" is a shadow object, the pages in the backing object 1726 * that precede the faulting pindex are deactivated by vm_fault(). So, this 1727 * function must only be concerned with pages in the first object. 1728 */ 1729 static void 1730 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 1731 { 1732 vm_map_entry_t entry; 1733 vm_object_t first_object; 1734 vm_offset_t end, start; 1735 vm_page_t m, m_next; 1736 vm_pindex_t pend, pstart; 1737 vm_size_t size; 1738 1739 VM_OBJECT_ASSERT_UNLOCKED(fs->object); 1740 first_object = fs->first_object; 1741 /* Neither fictitious nor unmanaged pages can be reclaimed. */ 1742 if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1743 VM_OBJECT_RLOCK(first_object); 1744 size = VM_FAULT_DONTNEED_MIN; 1745 if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1746 size = pagesizes[1]; 1747 end = rounddown2(vaddr, size); 1748 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1749 (entry = fs->entry)->start < end) { 1750 if (end - entry->start < size) 1751 start = entry->start; 1752 else 1753 start = end - size; 1754 pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1755 pstart = OFF_TO_IDX(entry->offset) + atop(start - 1756 entry->start); 1757 m_next = vm_page_find_least(first_object, pstart); 1758 pend = OFF_TO_IDX(entry->offset) + atop(end - 1759 entry->start); 1760 while ((m = m_next) != NULL && m->pindex < pend) { 1761 m_next = TAILQ_NEXT(m, listq); 1762 if (!vm_page_all_valid(m) || 1763 vm_page_busied(m)) 1764 continue; 1765 1766 /* 1767 * Don't clear PGA_REFERENCED, since it would 1768 * likely represent a reference by a different 1769 * process. 1770 * 1771 * Typically, at this point, prefetched pages 1772 * are still in the inactive queue. Only 1773 * pages that triggered page faults are in the 1774 * active queue. The test for whether the page 1775 * is in the inactive queue is racy; in the 1776 * worst case we will requeue the page 1777 * unnecessarily. 1778 */ 1779 if (!vm_page_inactive(m)) 1780 vm_page_deactivate(m); 1781 } 1782 } 1783 VM_OBJECT_RUNLOCK(first_object); 1784 } 1785 } 1786 1787 /* 1788 * vm_fault_prefault provides a quick way of clustering 1789 * pagefaults into a processes address space. It is a "cousin" 1790 * of vm_map_pmap_enter, except it runs at page fault time instead 1791 * of mmap time. 1792 */ 1793 static void 1794 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1795 int backward, int forward, bool obj_locked) 1796 { 1797 pmap_t pmap; 1798 vm_map_entry_t entry; 1799 vm_object_t backing_object, lobject; 1800 vm_offset_t addr, starta; 1801 vm_pindex_t pindex; 1802 vm_page_t m; 1803 int i; 1804 1805 pmap = fs->map->pmap; 1806 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1807 return; 1808 1809 entry = fs->entry; 1810 1811 if (addra < backward * PAGE_SIZE) { 1812 starta = entry->start; 1813 } else { 1814 starta = addra - backward * PAGE_SIZE; 1815 if (starta < entry->start) 1816 starta = entry->start; 1817 } 1818 1819 /* 1820 * Generate the sequence of virtual addresses that are candidates for 1821 * prefaulting in an outward spiral from the faulting virtual address, 1822 * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 1823 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 1824 * If the candidate address doesn't have a backing physical page, then 1825 * the loop immediately terminates. 1826 */ 1827 for (i = 0; i < 2 * imax(backward, forward); i++) { 1828 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 1829 PAGE_SIZE); 1830 if (addr > addra + forward * PAGE_SIZE) 1831 addr = 0; 1832 1833 if (addr < starta || addr >= entry->end) 1834 continue; 1835 1836 if (!pmap_is_prefaultable(pmap, addr)) 1837 continue; 1838 1839 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1840 lobject = entry->object.vm_object; 1841 if (!obj_locked) 1842 VM_OBJECT_RLOCK(lobject); 1843 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1844 lobject->type == OBJT_DEFAULT && 1845 (backing_object = lobject->backing_object) != NULL) { 1846 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1847 0, ("vm_fault_prefault: unaligned object offset")); 1848 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1849 VM_OBJECT_RLOCK(backing_object); 1850 if (!obj_locked || lobject != entry->object.vm_object) 1851 VM_OBJECT_RUNLOCK(lobject); 1852 lobject = backing_object; 1853 } 1854 if (m == NULL) { 1855 if (!obj_locked || lobject != entry->object.vm_object) 1856 VM_OBJECT_RUNLOCK(lobject); 1857 break; 1858 } 1859 if (vm_page_all_valid(m) && 1860 (m->flags & PG_FICTITIOUS) == 0) 1861 pmap_enter_quick(pmap, addr, m, entry->protection); 1862 if (!obj_locked || lobject != entry->object.vm_object) 1863 VM_OBJECT_RUNLOCK(lobject); 1864 } 1865 } 1866 1867 /* 1868 * Hold each of the physical pages that are mapped by the specified range of 1869 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1870 * and allow the specified types of access, "prot". If all of the implied 1871 * pages are successfully held, then the number of held pages is returned 1872 * together with pointers to those pages in the array "ma". However, if any 1873 * of the pages cannot be held, -1 is returned. 1874 */ 1875 int 1876 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1877 vm_prot_t prot, vm_page_t *ma, int max_count) 1878 { 1879 vm_offset_t end, va; 1880 vm_page_t *mp; 1881 int count; 1882 boolean_t pmap_failed; 1883 1884 if (len == 0) 1885 return (0); 1886 end = round_page(addr + len); 1887 addr = trunc_page(addr); 1888 1889 if (!vm_map_range_valid(map, addr, end)) 1890 return (-1); 1891 1892 if (atop(end - addr) > max_count) 1893 panic("vm_fault_quick_hold_pages: count > max_count"); 1894 count = atop(end - addr); 1895 1896 /* 1897 * Most likely, the physical pages are resident in the pmap, so it is 1898 * faster to try pmap_extract_and_hold() first. 1899 */ 1900 pmap_failed = FALSE; 1901 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1902 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1903 if (*mp == NULL) 1904 pmap_failed = TRUE; 1905 else if ((prot & VM_PROT_WRITE) != 0 && 1906 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1907 /* 1908 * Explicitly dirty the physical page. Otherwise, the 1909 * caller's changes may go unnoticed because they are 1910 * performed through an unmanaged mapping or by a DMA 1911 * operation. 1912 * 1913 * The object lock is not held here. 1914 * See vm_page_clear_dirty_mask(). 1915 */ 1916 vm_page_dirty(*mp); 1917 } 1918 } 1919 if (pmap_failed) { 1920 /* 1921 * One or more pages could not be held by the pmap. Either no 1922 * page was mapped at the specified virtual address or that 1923 * mapping had insufficient permissions. Attempt to fault in 1924 * and hold these pages. 1925 * 1926 * If vm_fault_disable_pagefaults() was called, 1927 * i.e., TDP_NOFAULTING is set, we must not sleep nor 1928 * acquire MD VM locks, which means we must not call 1929 * vm_fault(). Some (out of tree) callers mark 1930 * too wide a code area with vm_fault_disable_pagefaults() 1931 * already, use the VM_PROT_QUICK_NOFAULT flag to request 1932 * the proper behaviour explicitly. 1933 */ 1934 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 1935 (curthread->td_pflags & TDP_NOFAULTING) != 0) 1936 goto error; 1937 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1938 if (*mp == NULL && vm_fault(map, va, prot, 1939 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1940 goto error; 1941 } 1942 return (count); 1943 error: 1944 for (mp = ma; mp < ma + count; mp++) 1945 if (*mp != NULL) 1946 vm_page_unwire(*mp, PQ_INACTIVE); 1947 return (-1); 1948 } 1949 1950 /* 1951 * Routine: 1952 * vm_fault_copy_entry 1953 * Function: 1954 * Create new object backing dst_entry with private copy of all 1955 * underlying pages. When src_entry is equal to dst_entry, function 1956 * implements COW for wired-down map entry. Otherwise, it forks 1957 * wired entry into dst_map. 1958 * 1959 * In/out conditions: 1960 * The source and destination maps must be locked for write. 1961 * The source map entry must be wired down (or be a sharing map 1962 * entry corresponding to a main map entry that is wired down). 1963 */ 1964 void 1965 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, 1966 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1967 vm_ooffset_t *fork_charge) 1968 { 1969 vm_object_t backing_object, dst_object, object, src_object; 1970 vm_pindex_t dst_pindex, pindex, src_pindex; 1971 vm_prot_t access, prot; 1972 vm_offset_t vaddr; 1973 vm_page_t dst_m; 1974 vm_page_t src_m; 1975 bool upgrade; 1976 1977 upgrade = src_entry == dst_entry; 1978 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1979 ("vm_fault_copy_entry: vm_object not NULL")); 1980 1981 /* 1982 * If not an upgrade, then enter the mappings in the pmap as 1983 * read and/or execute accesses. Otherwise, enter them as 1984 * write accesses. 1985 * 1986 * A writeable large page mapping is only created if all of 1987 * the constituent small page mappings are modified. Marking 1988 * PTEs as modified on inception allows promotion to happen 1989 * without taking potentially large number of soft faults. 1990 */ 1991 access = prot = dst_entry->protection; 1992 if (!upgrade) 1993 access &= ~VM_PROT_WRITE; 1994 1995 src_object = src_entry->object.vm_object; 1996 src_pindex = OFF_TO_IDX(src_entry->offset); 1997 1998 if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1999 dst_object = src_object; 2000 vm_object_reference(dst_object); 2001 } else { 2002 /* 2003 * Create the top-level object for the destination entry. 2004 * Doesn't actually shadow anything - we copy the pages 2005 * directly. 2006 */ 2007 dst_object = vm_object_allocate_anon(atop(dst_entry->end - 2008 dst_entry->start), NULL, NULL, 0); 2009 #if VM_NRESERVLEVEL > 0 2010 dst_object->flags |= OBJ_COLORED; 2011 dst_object->pg_color = atop(dst_entry->start); 2012 #endif 2013 dst_object->domain = src_object->domain; 2014 dst_object->charge = dst_entry->end - dst_entry->start; 2015 2016 dst_entry->object.vm_object = dst_object; 2017 dst_entry->offset = 0; 2018 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 2019 } 2020 2021 VM_OBJECT_WLOCK(dst_object); 2022 if (fork_charge != NULL) { 2023 KASSERT(dst_entry->cred == NULL, 2024 ("vm_fault_copy_entry: leaked swp charge")); 2025 dst_object->cred = curthread->td_ucred; 2026 crhold(dst_object->cred); 2027 *fork_charge += dst_object->charge; 2028 } else if ((dst_object->type == OBJT_DEFAULT || 2029 (dst_object->flags & OBJ_SWAP) != 0) && 2030 dst_object->cred == NULL) { 2031 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 2032 dst_entry)); 2033 dst_object->cred = dst_entry->cred; 2034 dst_entry->cred = NULL; 2035 } 2036 2037 /* 2038 * Loop through all of the virtual pages within the entry's 2039 * range, copying each page from the source object to the 2040 * destination object. Since the source is wired, those pages 2041 * must exist. In contrast, the destination is pageable. 2042 * Since the destination object doesn't share any backing storage 2043 * with the source object, all of its pages must be dirtied, 2044 * regardless of whether they can be written. 2045 */ 2046 for (vaddr = dst_entry->start, dst_pindex = 0; 2047 vaddr < dst_entry->end; 2048 vaddr += PAGE_SIZE, dst_pindex++) { 2049 again: 2050 /* 2051 * Find the page in the source object, and copy it in. 2052 * Because the source is wired down, the page will be 2053 * in memory. 2054 */ 2055 if (src_object != dst_object) 2056 VM_OBJECT_RLOCK(src_object); 2057 object = src_object; 2058 pindex = src_pindex + dst_pindex; 2059 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2060 (backing_object = object->backing_object) != NULL) { 2061 /* 2062 * Unless the source mapping is read-only or 2063 * it is presently being upgraded from 2064 * read-only, the first object in the shadow 2065 * chain should provide all of the pages. In 2066 * other words, this loop body should never be 2067 * executed when the source mapping is already 2068 * read/write. 2069 */ 2070 KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 2071 upgrade, 2072 ("vm_fault_copy_entry: main object missing page")); 2073 2074 VM_OBJECT_RLOCK(backing_object); 2075 pindex += OFF_TO_IDX(object->backing_object_offset); 2076 if (object != dst_object) 2077 VM_OBJECT_RUNLOCK(object); 2078 object = backing_object; 2079 } 2080 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 2081 2082 if (object != dst_object) { 2083 /* 2084 * Allocate a page in the destination object. 2085 */ 2086 dst_m = vm_page_alloc(dst_object, (src_object == 2087 dst_object ? src_pindex : 0) + dst_pindex, 2088 VM_ALLOC_NORMAL); 2089 if (dst_m == NULL) { 2090 VM_OBJECT_WUNLOCK(dst_object); 2091 VM_OBJECT_RUNLOCK(object); 2092 vm_wait(dst_object); 2093 VM_OBJECT_WLOCK(dst_object); 2094 goto again; 2095 } 2096 pmap_copy_page(src_m, dst_m); 2097 2098 /* 2099 * The object lock does not guarantee that "src_m" will 2100 * transition from invalid to valid, but it does ensure 2101 * that "src_m" will not transition from valid to 2102 * invalid. 2103 */ 2104 dst_m->dirty = dst_m->valid = src_m->valid; 2105 VM_OBJECT_RUNLOCK(object); 2106 } else { 2107 dst_m = src_m; 2108 if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 2109 goto again; 2110 if (dst_m->pindex >= dst_object->size) { 2111 /* 2112 * We are upgrading. Index can occur 2113 * out of bounds if the object type is 2114 * vnode and the file was truncated. 2115 */ 2116 vm_page_xunbusy(dst_m); 2117 break; 2118 } 2119 } 2120 2121 /* 2122 * Enter it in the pmap. If a wired, copy-on-write 2123 * mapping is being replaced by a write-enabled 2124 * mapping, then wire that new mapping. 2125 * 2126 * The page can be invalid if the user called 2127 * msync(MS_INVALIDATE) or truncated the backing vnode 2128 * or shared memory object. In this case, do not 2129 * insert it into pmap, but still do the copy so that 2130 * all copies of the wired map entry have similar 2131 * backing pages. 2132 */ 2133 if (vm_page_all_valid(dst_m)) { 2134 VM_OBJECT_WUNLOCK(dst_object); 2135 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 2136 access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 2137 VM_OBJECT_WLOCK(dst_object); 2138 } 2139 2140 /* 2141 * Mark it no longer busy, and put it on the active list. 2142 */ 2143 if (upgrade) { 2144 if (src_m != dst_m) { 2145 vm_page_unwire(src_m, PQ_INACTIVE); 2146 vm_page_wire(dst_m); 2147 } else { 2148 KASSERT(vm_page_wired(dst_m), 2149 ("dst_m %p is not wired", dst_m)); 2150 } 2151 } else { 2152 vm_page_activate(dst_m); 2153 } 2154 vm_page_xunbusy(dst_m); 2155 } 2156 VM_OBJECT_WUNLOCK(dst_object); 2157 if (upgrade) { 2158 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2159 vm_object_deallocate(src_object); 2160 } 2161 } 2162 2163 /* 2164 * Block entry into the machine-independent layer's page fault handler by 2165 * the calling thread. Subsequent calls to vm_fault() by that thread will 2166 * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 2167 * spurious page faults. 2168 */ 2169 int 2170 vm_fault_disable_pagefaults(void) 2171 { 2172 2173 return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 2174 } 2175 2176 void 2177 vm_fault_enable_pagefaults(int save) 2178 { 2179 2180 curthread_pflags_restore(save); 2181 } 2182