1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/ktr.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/racct.h> 81 #include <sys/resourcevar.h> 82 #include <sys/rwlock.h> 83 #include <sys/file.h> 84 #include <sys/sysctl.h> 85 #include <sys/sysent.h> 86 #include <sys/shm.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_pageout.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_pager.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_extern.h> 98 #include <vm/vnode_pager.h> 99 #include <vm/swap_pager.h> 100 #include <vm/uma.h> 101 102 /* 103 * Virtual memory maps provide for the mapping, protection, 104 * and sharing of virtual memory objects. In addition, 105 * this module provides for an efficient virtual copy of 106 * memory from one map to another. 107 * 108 * Synchronization is required prior to most operations. 109 * 110 * Maps consist of an ordered doubly-linked list of simple 111 * entries; a self-adjusting binary search tree of these 112 * entries is used to speed up lookups. 113 * 114 * Since portions of maps are specified by start/end addresses, 115 * which may not align with existing map entries, all 116 * routines merely "clip" entries to these start/end values. 117 * [That is, an entry is split into two, bordering at a 118 * start or end value.] Note that these clippings may not 119 * always be necessary (as the two resulting entries are then 120 * not changed); however, the clipping is done for convenience. 121 * 122 * As mentioned above, virtual copy operations are performed 123 * by copying VM object references from one map to 124 * another, and then marking both regions as copy-on-write. 125 */ 126 127 static struct mtx map_sleep_mtx; 128 static uma_zone_t mapentzone; 129 static uma_zone_t kmapentzone; 130 static uma_zone_t mapzone; 131 static uma_zone_t vmspace_zone; 132 static int vmspace_zinit(void *mem, int size, int flags); 133 static int vm_map_zinit(void *mem, int ize, int flags); 134 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 135 vm_offset_t max); 136 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 137 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 138 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 139 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 140 vm_map_entry_t gap_entry); 141 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 142 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 143 #ifdef INVARIANTS 144 static void vm_map_zdtor(void *mem, int size, void *arg); 145 static void vmspace_zdtor(void *mem, int size, void *arg); 146 #endif 147 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 148 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 149 int cow); 150 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 151 vm_offset_t failed_addr); 152 153 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 154 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 155 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 156 157 /* 158 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 159 * stable. 160 */ 161 #define PROC_VMSPACE_LOCK(p) do { } while (0) 162 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 163 164 /* 165 * VM_MAP_RANGE_CHECK: [ internal use only ] 166 * 167 * Asserts that the starting and ending region 168 * addresses fall within the valid range of the map. 169 */ 170 #define VM_MAP_RANGE_CHECK(map, start, end) \ 171 { \ 172 if (start < vm_map_min(map)) \ 173 start = vm_map_min(map); \ 174 if (end > vm_map_max(map)) \ 175 end = vm_map_max(map); \ 176 if (start > end) \ 177 start = end; \ 178 } 179 180 /* 181 * vm_map_startup: 182 * 183 * Initialize the vm_map module. Must be called before 184 * any other vm_map routines. 185 * 186 * Map and entry structures are allocated from the general 187 * purpose memory pool with some exceptions: 188 * 189 * - The kernel map and kmem submap are allocated statically. 190 * - Kernel map entries are allocated out of a static pool. 191 * 192 * These restrictions are necessary since malloc() uses the 193 * maps and requires map entries. 194 */ 195 196 void 197 vm_map_startup(void) 198 { 199 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 200 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 201 #ifdef INVARIANTS 202 vm_map_zdtor, 203 #else 204 NULL, 205 #endif 206 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 207 uma_prealloc(mapzone, MAX_KMAP); 208 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 209 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 210 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 211 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 212 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 213 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 214 #ifdef INVARIANTS 215 vmspace_zdtor, 216 #else 217 NULL, 218 #endif 219 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 220 } 221 222 static int 223 vmspace_zinit(void *mem, int size, int flags) 224 { 225 struct vmspace *vm; 226 227 vm = (struct vmspace *)mem; 228 229 vm->vm_map.pmap = NULL; 230 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 231 PMAP_LOCK_INIT(vmspace_pmap(vm)); 232 return (0); 233 } 234 235 static int 236 vm_map_zinit(void *mem, int size, int flags) 237 { 238 vm_map_t map; 239 240 map = (vm_map_t)mem; 241 memset(map, 0, sizeof(*map)); 242 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 243 sx_init(&map->lock, "vm map (user)"); 244 return (0); 245 } 246 247 #ifdef INVARIANTS 248 static void 249 vmspace_zdtor(void *mem, int size, void *arg) 250 { 251 struct vmspace *vm; 252 253 vm = (struct vmspace *)mem; 254 255 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 256 } 257 static void 258 vm_map_zdtor(void *mem, int size, void *arg) 259 { 260 vm_map_t map; 261 262 map = (vm_map_t)mem; 263 KASSERT(map->nentries == 0, 264 ("map %p nentries == %d on free.", 265 map, map->nentries)); 266 KASSERT(map->size == 0, 267 ("map %p size == %lu on free.", 268 map, (unsigned long)map->size)); 269 } 270 #endif /* INVARIANTS */ 271 272 /* 273 * Allocate a vmspace structure, including a vm_map and pmap, 274 * and initialize those structures. The refcnt is set to 1. 275 * 276 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 277 */ 278 struct vmspace * 279 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 280 { 281 struct vmspace *vm; 282 283 vm = uma_zalloc(vmspace_zone, M_WAITOK); 284 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 285 if (!pinit(vmspace_pmap(vm))) { 286 uma_zfree(vmspace_zone, vm); 287 return (NULL); 288 } 289 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 290 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 291 vm->vm_refcnt = 1; 292 vm->vm_shm = NULL; 293 vm->vm_swrss = 0; 294 vm->vm_tsize = 0; 295 vm->vm_dsize = 0; 296 vm->vm_ssize = 0; 297 vm->vm_taddr = 0; 298 vm->vm_daddr = 0; 299 vm->vm_maxsaddr = 0; 300 return (vm); 301 } 302 303 #ifdef RACCT 304 static void 305 vmspace_container_reset(struct proc *p) 306 { 307 308 PROC_LOCK(p); 309 racct_set(p, RACCT_DATA, 0); 310 racct_set(p, RACCT_STACK, 0); 311 racct_set(p, RACCT_RSS, 0); 312 racct_set(p, RACCT_MEMLOCK, 0); 313 racct_set(p, RACCT_VMEM, 0); 314 PROC_UNLOCK(p); 315 } 316 #endif 317 318 static inline void 319 vmspace_dofree(struct vmspace *vm) 320 { 321 322 CTR1(KTR_VM, "vmspace_free: %p", vm); 323 324 /* 325 * Make sure any SysV shm is freed, it might not have been in 326 * exit1(). 327 */ 328 shmexit(vm); 329 330 /* 331 * Lock the map, to wait out all other references to it. 332 * Delete all of the mappings and pages they hold, then call 333 * the pmap module to reclaim anything left. 334 */ 335 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 336 vm_map_max(&vm->vm_map)); 337 338 pmap_release(vmspace_pmap(vm)); 339 vm->vm_map.pmap = NULL; 340 uma_zfree(vmspace_zone, vm); 341 } 342 343 void 344 vmspace_free(struct vmspace *vm) 345 { 346 347 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 348 "vmspace_free() called"); 349 350 if (vm->vm_refcnt == 0) 351 panic("vmspace_free: attempt to free already freed vmspace"); 352 353 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 354 vmspace_dofree(vm); 355 } 356 357 void 358 vmspace_exitfree(struct proc *p) 359 { 360 struct vmspace *vm; 361 362 PROC_VMSPACE_LOCK(p); 363 vm = p->p_vmspace; 364 p->p_vmspace = NULL; 365 PROC_VMSPACE_UNLOCK(p); 366 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 367 vmspace_free(vm); 368 } 369 370 void 371 vmspace_exit(struct thread *td) 372 { 373 int refcnt; 374 struct vmspace *vm; 375 struct proc *p; 376 377 /* 378 * Release user portion of address space. 379 * This releases references to vnodes, 380 * which could cause I/O if the file has been unlinked. 381 * Need to do this early enough that we can still sleep. 382 * 383 * The last exiting process to reach this point releases as 384 * much of the environment as it can. vmspace_dofree() is the 385 * slower fallback in case another process had a temporary 386 * reference to the vmspace. 387 */ 388 389 p = td->td_proc; 390 vm = p->p_vmspace; 391 atomic_add_int(&vmspace0.vm_refcnt, 1); 392 refcnt = vm->vm_refcnt; 393 do { 394 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 395 /* Switch now since other proc might free vmspace */ 396 PROC_VMSPACE_LOCK(p); 397 p->p_vmspace = &vmspace0; 398 PROC_VMSPACE_UNLOCK(p); 399 pmap_activate(td); 400 } 401 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); 402 if (refcnt == 1) { 403 if (p->p_vmspace != vm) { 404 /* vmspace not yet freed, switch back */ 405 PROC_VMSPACE_LOCK(p); 406 p->p_vmspace = vm; 407 PROC_VMSPACE_UNLOCK(p); 408 pmap_activate(td); 409 } 410 pmap_remove_pages(vmspace_pmap(vm)); 411 /* Switch now since this proc will free vmspace */ 412 PROC_VMSPACE_LOCK(p); 413 p->p_vmspace = &vmspace0; 414 PROC_VMSPACE_UNLOCK(p); 415 pmap_activate(td); 416 vmspace_dofree(vm); 417 } 418 #ifdef RACCT 419 if (racct_enable) 420 vmspace_container_reset(p); 421 #endif 422 } 423 424 /* Acquire reference to vmspace owned by another process. */ 425 426 struct vmspace * 427 vmspace_acquire_ref(struct proc *p) 428 { 429 struct vmspace *vm; 430 int refcnt; 431 432 PROC_VMSPACE_LOCK(p); 433 vm = p->p_vmspace; 434 if (vm == NULL) { 435 PROC_VMSPACE_UNLOCK(p); 436 return (NULL); 437 } 438 refcnt = vm->vm_refcnt; 439 do { 440 if (refcnt <= 0) { /* Avoid 0->1 transition */ 441 PROC_VMSPACE_UNLOCK(p); 442 return (NULL); 443 } 444 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1)); 445 if (vm != p->p_vmspace) { 446 PROC_VMSPACE_UNLOCK(p); 447 vmspace_free(vm); 448 return (NULL); 449 } 450 PROC_VMSPACE_UNLOCK(p); 451 return (vm); 452 } 453 454 /* 455 * Switch between vmspaces in an AIO kernel process. 456 * 457 * The new vmspace is either the vmspace of a user process obtained 458 * from an active AIO request or the initial vmspace of the AIO kernel 459 * process (when it is idling). Because user processes will block to 460 * drain any active AIO requests before proceeding in exit() or 461 * execve(), the reference count for vmspaces from AIO requests can 462 * never be 0. Similarly, AIO kernel processes hold an extra 463 * reference on their initial vmspace for the life of the process. As 464 * a result, the 'newvm' vmspace always has a non-zero reference 465 * count. This permits an additional reference on 'newvm' to be 466 * acquired via a simple atomic increment rather than the loop in 467 * vmspace_acquire_ref() above. 468 */ 469 void 470 vmspace_switch_aio(struct vmspace *newvm) 471 { 472 struct vmspace *oldvm; 473 474 /* XXX: Need some way to assert that this is an aio daemon. */ 475 476 KASSERT(newvm->vm_refcnt > 0, 477 ("vmspace_switch_aio: newvm unreferenced")); 478 479 oldvm = curproc->p_vmspace; 480 if (oldvm == newvm) 481 return; 482 483 /* 484 * Point to the new address space and refer to it. 485 */ 486 curproc->p_vmspace = newvm; 487 atomic_add_int(&newvm->vm_refcnt, 1); 488 489 /* Activate the new mapping. */ 490 pmap_activate(curthread); 491 492 vmspace_free(oldvm); 493 } 494 495 void 496 _vm_map_lock(vm_map_t map, const char *file, int line) 497 { 498 499 if (map->system_map) 500 mtx_lock_flags_(&map->system_mtx, 0, file, line); 501 else 502 sx_xlock_(&map->lock, file, line); 503 map->timestamp++; 504 } 505 506 void 507 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 508 { 509 vm_object_t object, object1; 510 struct vnode *vp; 511 512 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 513 return; 514 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 515 ("Submap with execs")); 516 object = entry->object.vm_object; 517 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 518 VM_OBJECT_RLOCK(object); 519 while ((object1 = object->backing_object) != NULL) { 520 VM_OBJECT_RLOCK(object1); 521 VM_OBJECT_RUNLOCK(object); 522 object = object1; 523 } 524 525 vp = NULL; 526 if (object->type == OBJT_DEAD) { 527 /* 528 * For OBJT_DEAD objects, v_writecount was handled in 529 * vnode_pager_dealloc(). 530 */ 531 } else if (object->type == OBJT_VNODE) { 532 vp = object->handle; 533 } else if (object->type == OBJT_SWAP) { 534 KASSERT((object->flags & OBJ_TMPFS_NODE) != 0, 535 ("vm_map_entry_set_vnode_text: swap and !TMPFS " 536 "entry %p, object %p, add %d", entry, object, add)); 537 /* 538 * Tmpfs VREG node, which was reclaimed, has 539 * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS. In 540 * this case there is no v_writecount to adjust. 541 */ 542 if ((object->flags & OBJ_TMPFS) != 0) 543 vp = object->un_pager.swp.swp_tmpfs; 544 } else { 545 KASSERT(0, 546 ("vm_map_entry_set_vnode_text: wrong object type, " 547 "entry %p, object %p, add %d", entry, object, add)); 548 } 549 if (vp != NULL) { 550 if (add) { 551 VOP_SET_TEXT_CHECKED(vp); 552 VM_OBJECT_RUNLOCK(object); 553 } else { 554 vhold(vp); 555 VM_OBJECT_RUNLOCK(object); 556 vn_lock(vp, LK_SHARED | LK_RETRY); 557 VOP_UNSET_TEXT_CHECKED(vp); 558 VOP_UNLOCK(vp, 0); 559 vdrop(vp); 560 } 561 } else { 562 VM_OBJECT_RUNLOCK(object); 563 } 564 } 565 566 static void 567 vm_map_process_deferred(void) 568 { 569 struct thread *td; 570 vm_map_entry_t entry, next; 571 vm_object_t object; 572 573 td = curthread; 574 entry = td->td_map_def_user; 575 td->td_map_def_user = NULL; 576 while (entry != NULL) { 577 next = entry->next; 578 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | 579 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | 580 MAP_ENTRY_VN_EXEC)); 581 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { 582 /* 583 * Decrement the object's writemappings and 584 * possibly the vnode's v_writecount. 585 */ 586 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 587 ("Submap with writecount")); 588 object = entry->object.vm_object; 589 KASSERT(object != NULL, ("No object for writecount")); 590 vm_pager_release_writecount(object, entry->start, 591 entry->end); 592 } 593 vm_map_entry_set_vnode_text(entry, false); 594 vm_map_entry_deallocate(entry, FALSE); 595 entry = next; 596 } 597 } 598 599 #ifdef INVARIANTS 600 static void 601 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 602 { 603 604 if (map->system_map) 605 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 606 else 607 sx_assert_(&map->lock, SA_XLOCKED, file, line); 608 } 609 610 #define VM_MAP_ASSERT_LOCKED(map) \ 611 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 612 613 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL }; 614 #ifdef DIAGNOSTIC 615 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK; 616 #else 617 static int enable_vmmap_check = VMMAP_CHECK_NONE; 618 #endif 619 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 620 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 621 622 static void _vm_map_assert_consistent(vm_map_t map, int check); 623 624 #define VM_MAP_ASSERT_CONSISTENT(map) \ 625 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL) 626 #ifdef DIAGNOSTIC 627 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \ 628 if (map->nupdates > map->nentries) { \ 629 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \ 630 map->nupdates = 0; \ 631 } \ 632 } while (0) 633 #else 634 #define VM_MAP_UNLOCK_CONSISTENT(map) 635 #endif 636 #else 637 #define VM_MAP_ASSERT_LOCKED(map) 638 #define VM_MAP_ASSERT_CONSISTENT(map) 639 #define VM_MAP_UNLOCK_CONSISTENT(map) 640 #endif /* INVARIANTS */ 641 642 void 643 _vm_map_unlock(vm_map_t map, const char *file, int line) 644 { 645 646 VM_MAP_UNLOCK_CONSISTENT(map); 647 if (map->system_map) 648 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 649 else { 650 sx_xunlock_(&map->lock, file, line); 651 vm_map_process_deferred(); 652 } 653 } 654 655 void 656 _vm_map_lock_read(vm_map_t map, const char *file, int line) 657 { 658 659 if (map->system_map) 660 mtx_lock_flags_(&map->system_mtx, 0, file, line); 661 else 662 sx_slock_(&map->lock, file, line); 663 } 664 665 void 666 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 667 { 668 669 if (map->system_map) 670 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 671 else { 672 sx_sunlock_(&map->lock, file, line); 673 vm_map_process_deferred(); 674 } 675 } 676 677 int 678 _vm_map_trylock(vm_map_t map, const char *file, int line) 679 { 680 int error; 681 682 error = map->system_map ? 683 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 684 !sx_try_xlock_(&map->lock, file, line); 685 if (error == 0) 686 map->timestamp++; 687 return (error == 0); 688 } 689 690 int 691 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 692 { 693 int error; 694 695 error = map->system_map ? 696 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 697 !sx_try_slock_(&map->lock, file, line); 698 return (error == 0); 699 } 700 701 /* 702 * _vm_map_lock_upgrade: [ internal use only ] 703 * 704 * Tries to upgrade a read (shared) lock on the specified map to a write 705 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 706 * non-zero value if the upgrade fails. If the upgrade fails, the map is 707 * returned without a read or write lock held. 708 * 709 * Requires that the map be read locked. 710 */ 711 int 712 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 713 { 714 unsigned int last_timestamp; 715 716 if (map->system_map) { 717 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 718 } else { 719 if (!sx_try_upgrade_(&map->lock, file, line)) { 720 last_timestamp = map->timestamp; 721 sx_sunlock_(&map->lock, file, line); 722 vm_map_process_deferred(); 723 /* 724 * If the map's timestamp does not change while the 725 * map is unlocked, then the upgrade succeeds. 726 */ 727 sx_xlock_(&map->lock, file, line); 728 if (last_timestamp != map->timestamp) { 729 sx_xunlock_(&map->lock, file, line); 730 return (1); 731 } 732 } 733 } 734 map->timestamp++; 735 return (0); 736 } 737 738 void 739 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 740 { 741 742 if (map->system_map) { 743 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 744 } else { 745 VM_MAP_UNLOCK_CONSISTENT(map); 746 sx_downgrade_(&map->lock, file, line); 747 } 748 } 749 750 /* 751 * vm_map_locked: 752 * 753 * Returns a non-zero value if the caller holds a write (exclusive) lock 754 * on the specified map and the value "0" otherwise. 755 */ 756 int 757 vm_map_locked(vm_map_t map) 758 { 759 760 if (map->system_map) 761 return (mtx_owned(&map->system_mtx)); 762 else 763 return (sx_xlocked(&map->lock)); 764 } 765 766 /* 767 * _vm_map_unlock_and_wait: 768 * 769 * Atomically releases the lock on the specified map and puts the calling 770 * thread to sleep. The calling thread will remain asleep until either 771 * vm_map_wakeup() is performed on the map or the specified timeout is 772 * exceeded. 773 * 774 * WARNING! This function does not perform deferred deallocations of 775 * objects and map entries. Therefore, the calling thread is expected to 776 * reacquire the map lock after reawakening and later perform an ordinary 777 * unlock operation, such as vm_map_unlock(), before completing its 778 * operation on the map. 779 */ 780 int 781 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 782 { 783 784 VM_MAP_UNLOCK_CONSISTENT(map); 785 mtx_lock(&map_sleep_mtx); 786 if (map->system_map) 787 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 788 else 789 sx_xunlock_(&map->lock, file, line); 790 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 791 timo)); 792 } 793 794 /* 795 * vm_map_wakeup: 796 * 797 * Awaken any threads that have slept on the map using 798 * vm_map_unlock_and_wait(). 799 */ 800 void 801 vm_map_wakeup(vm_map_t map) 802 { 803 804 /* 805 * Acquire and release map_sleep_mtx to prevent a wakeup() 806 * from being performed (and lost) between the map unlock 807 * and the msleep() in _vm_map_unlock_and_wait(). 808 */ 809 mtx_lock(&map_sleep_mtx); 810 mtx_unlock(&map_sleep_mtx); 811 wakeup(&map->root); 812 } 813 814 void 815 vm_map_busy(vm_map_t map) 816 { 817 818 VM_MAP_ASSERT_LOCKED(map); 819 map->busy++; 820 } 821 822 void 823 vm_map_unbusy(vm_map_t map) 824 { 825 826 VM_MAP_ASSERT_LOCKED(map); 827 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 828 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 829 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 830 wakeup(&map->busy); 831 } 832 } 833 834 void 835 vm_map_wait_busy(vm_map_t map) 836 { 837 838 VM_MAP_ASSERT_LOCKED(map); 839 while (map->busy) { 840 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 841 if (map->system_map) 842 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 843 else 844 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 845 } 846 map->timestamp++; 847 } 848 849 long 850 vmspace_resident_count(struct vmspace *vmspace) 851 { 852 return pmap_resident_count(vmspace_pmap(vmspace)); 853 } 854 855 /* 856 * vm_map_create: 857 * 858 * Creates and returns a new empty VM map with 859 * the given physical map structure, and having 860 * the given lower and upper address bounds. 861 */ 862 vm_map_t 863 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 864 { 865 vm_map_t result; 866 867 result = uma_zalloc(mapzone, M_WAITOK); 868 CTR1(KTR_VM, "vm_map_create: %p", result); 869 _vm_map_init(result, pmap, min, max); 870 return (result); 871 } 872 873 /* 874 * Initialize an existing vm_map structure 875 * such as that in the vmspace structure. 876 */ 877 static void 878 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 879 { 880 881 map->header.next = map->header.prev = &map->header; 882 map->header.eflags = MAP_ENTRY_HEADER; 883 map->needs_wakeup = FALSE; 884 map->system_map = 0; 885 map->pmap = pmap; 886 map->header.end = min; 887 map->header.start = max; 888 map->flags = 0; 889 map->root = NULL; 890 map->timestamp = 0; 891 map->busy = 0; 892 map->anon_loc = 0; 893 #ifdef DIAGNOSTIC 894 map->nupdates = 0; 895 #endif 896 } 897 898 void 899 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 900 { 901 902 _vm_map_init(map, pmap, min, max); 903 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 904 sx_init(&map->lock, "user map"); 905 } 906 907 /* 908 * vm_map_entry_dispose: [ internal use only ] 909 * 910 * Inverse of vm_map_entry_create. 911 */ 912 static void 913 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 914 { 915 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 916 } 917 918 /* 919 * vm_map_entry_create: [ internal use only ] 920 * 921 * Allocates a VM map entry for insertion. 922 * No entry fields are filled in. 923 */ 924 static vm_map_entry_t 925 vm_map_entry_create(vm_map_t map) 926 { 927 vm_map_entry_t new_entry; 928 929 if (map->system_map) 930 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 931 else 932 new_entry = uma_zalloc(mapentzone, M_WAITOK); 933 if (new_entry == NULL) 934 panic("vm_map_entry_create: kernel resources exhausted"); 935 return (new_entry); 936 } 937 938 /* 939 * vm_map_entry_set_behavior: 940 * 941 * Set the expected access behavior, either normal, random, or 942 * sequential. 943 */ 944 static inline void 945 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 946 { 947 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 948 (behavior & MAP_ENTRY_BEHAV_MASK); 949 } 950 951 /* 952 * vm_map_entry_max_free_{left,right}: 953 * 954 * Compute the size of the largest free gap between two entries, 955 * one the root of a tree and the other the ancestor of that root 956 * that is the least or greatest ancestor found on the search path. 957 */ 958 static inline vm_size_t 959 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) 960 { 961 962 return (root->left != NULL ? 963 root->left->max_free : root->start - left_ancestor->end); 964 } 965 966 static inline vm_size_t 967 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) 968 { 969 970 return (root->right != NULL ? 971 root->right->max_free : right_ancestor->start - root->end); 972 } 973 974 #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ 975 vm_size_t max_free; \ 976 \ 977 /* \ 978 * Infer root->right->max_free == root->max_free when \ 979 * y->max_free < root->max_free || root->max_free == 0. \ 980 * Otherwise, look right to find it. \ 981 */ \ 982 y = root->left; \ 983 max_free = root->max_free; \ 984 KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \ 985 ("%s: max_free invariant fails", __func__)); \ 986 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ 987 max_free = vm_map_entry_max_free_right(root, rlist); \ 988 if (y != NULL && (test)) { \ 989 /* Rotate right and make y root. */ \ 990 root->left = y->right; \ 991 y->right = root; \ 992 if (max_free < y->max_free) \ 993 root->max_free = max_free = MAX(max_free, \ 994 vm_map_entry_max_free_left(root, y)); \ 995 root = y; \ 996 y = root->left; \ 997 } \ 998 /* Copy right->max_free. Put root on rlist. */ \ 999 root->max_free = max_free; \ 1000 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \ 1001 ("%s: max_free not copied from right", __func__)); \ 1002 root->left = rlist; \ 1003 rlist = root; \ 1004 root = y; \ 1005 } while (0) 1006 1007 #define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ 1008 vm_size_t max_free; \ 1009 \ 1010 /* \ 1011 * Infer root->left->max_free == root->max_free when \ 1012 * y->max_free < root->max_free || root->max_free == 0. \ 1013 * Otherwise, look left to find it. \ 1014 */ \ 1015 y = root->right; \ 1016 max_free = root->max_free; \ 1017 KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \ 1018 ("%s: max_free invariant fails", __func__)); \ 1019 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ 1020 max_free = vm_map_entry_max_free_left(root, llist); \ 1021 if (y != NULL && (test)) { \ 1022 /* Rotate left and make y root. */ \ 1023 root->right = y->left; \ 1024 y->left = root; \ 1025 if (max_free < y->max_free) \ 1026 root->max_free = max_free = MAX(max_free, \ 1027 vm_map_entry_max_free_right(root, y)); \ 1028 root = y; \ 1029 y = root->right; \ 1030 } \ 1031 /* Copy left->max_free. Put root on llist. */ \ 1032 root->max_free = max_free; \ 1033 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \ 1034 ("%s: max_free not copied from left", __func__)); \ 1035 root->right = llist; \ 1036 llist = root; \ 1037 root = y; \ 1038 } while (0) 1039 1040 /* 1041 * Walk down the tree until we find addr or a NULL pointer where addr would go, 1042 * breaking off left and right subtrees of nodes less than, or greater than 1043 * addr. Treat pointers to nodes with max_free < length as NULL pointers. 1044 * llist and rlist are the two sides in reverse order (bottom-up), with llist 1045 * linked by the right pointer and rlist linked by the left pointer in the 1046 * vm_map_entry, and both lists terminated by &map->header. This function, and 1047 * the subsequent call to vm_map_splay_merge, rely on the start and end address 1048 * values in &map->header. 1049 */ 1050 static vm_map_entry_t 1051 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, 1052 vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist) 1053 { 1054 vm_map_entry_t llist, rlist, root, y; 1055 1056 llist = rlist = &map->header; 1057 root = map->root; 1058 while (root != NULL && root->max_free >= length) { 1059 KASSERT(llist->end <= root->start && root->end <= rlist->start, 1060 ("%s: root not within tree bounds", __func__)); 1061 if (addr < root->start) { 1062 SPLAY_LEFT_STEP(root, y, rlist, 1063 y->max_free >= length && addr < y->start); 1064 } else if (addr >= root->end) { 1065 SPLAY_RIGHT_STEP(root, y, llist, 1066 y->max_free >= length && addr >= y->end); 1067 } else 1068 break; 1069 } 1070 *out_llist = llist; 1071 *out_rlist = rlist; 1072 return (root); 1073 } 1074 1075 static void 1076 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) 1077 { 1078 vm_map_entry_t rlist, y; 1079 1080 root = root->right; 1081 rlist = *iolist; 1082 while (root != NULL) 1083 SPLAY_LEFT_STEP(root, y, rlist, true); 1084 *iolist = rlist; 1085 } 1086 1087 static void 1088 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) 1089 { 1090 vm_map_entry_t llist, y; 1091 1092 root = root->left; 1093 llist = *iolist; 1094 while (root != NULL) 1095 SPLAY_RIGHT_STEP(root, y, llist, true); 1096 *iolist = llist; 1097 } 1098 1099 static inline void 1100 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b) 1101 { 1102 vm_map_entry_t tmp; 1103 1104 tmp = *b; 1105 *b = *a; 1106 *a = tmp; 1107 } 1108 1109 /* 1110 * Walk back up the two spines, flip the pointers and set max_free. The 1111 * subtrees of the root go at the bottom of llist and rlist. 1112 */ 1113 static void 1114 vm_map_splay_merge(vm_map_t map, vm_map_entry_t root, 1115 vm_map_entry_t llist, vm_map_entry_t rlist) 1116 { 1117 vm_map_entry_t prev; 1118 vm_size_t max_free_left, max_free_right; 1119 1120 max_free_left = vm_map_entry_max_free_left(root, llist); 1121 if (llist != &map->header) { 1122 prev = root->left; 1123 do { 1124 /* 1125 * The max_free values of the children of llist are in 1126 * llist->max_free and max_free_left. Update with the 1127 * max value. 1128 */ 1129 llist->max_free = max_free_left = 1130 MAX(llist->max_free, max_free_left); 1131 vm_map_entry_swap(&llist->right, &prev); 1132 vm_map_entry_swap(&prev, &llist); 1133 } while (llist != &map->header); 1134 root->left = prev; 1135 } 1136 max_free_right = vm_map_entry_max_free_right(root, rlist); 1137 if (rlist != &map->header) { 1138 prev = root->right; 1139 do { 1140 /* 1141 * The max_free values of the children of rlist are in 1142 * rlist->max_free and max_free_right. Update with the 1143 * max value. 1144 */ 1145 rlist->max_free = max_free_right = 1146 MAX(rlist->max_free, max_free_right); 1147 vm_map_entry_swap(&rlist->left, &prev); 1148 vm_map_entry_swap(&prev, &rlist); 1149 } while (rlist != &map->header); 1150 root->right = prev; 1151 } 1152 root->max_free = MAX(max_free_left, max_free_right); 1153 map->root = root; 1154 #ifdef DIAGNOSTIC 1155 ++map->nupdates; 1156 #endif 1157 } 1158 1159 /* 1160 * vm_map_splay: 1161 * 1162 * The Sleator and Tarjan top-down splay algorithm with the 1163 * following variation. Max_free must be computed bottom-up, so 1164 * on the downward pass, maintain the left and right spines in 1165 * reverse order. Then, make a second pass up each side to fix 1166 * the pointers and compute max_free. The time bound is O(log n) 1167 * amortized. 1168 * 1169 * The new root is the vm_map_entry containing "addr", or else an 1170 * adjacent entry (lower if possible) if addr is not in the tree. 1171 * 1172 * The map must be locked, and leaves it so. 1173 * 1174 * Returns: the new root. 1175 */ 1176 static vm_map_entry_t 1177 vm_map_splay(vm_map_t map, vm_offset_t addr) 1178 { 1179 vm_map_entry_t llist, rlist, root; 1180 1181 root = vm_map_splay_split(map, addr, 0, &llist, &rlist); 1182 if (root != NULL) { 1183 /* do nothing */ 1184 } else if (llist != &map->header) { 1185 /* 1186 * Recover the greatest node in the left 1187 * subtree and make it the root. 1188 */ 1189 root = llist; 1190 llist = root->right; 1191 root->right = NULL; 1192 } else if (rlist != &map->header) { 1193 /* 1194 * Recover the least node in the right 1195 * subtree and make it the root. 1196 */ 1197 root = rlist; 1198 rlist = root->left; 1199 root->left = NULL; 1200 } else { 1201 /* There is no root. */ 1202 return (NULL); 1203 } 1204 vm_map_splay_merge(map, root, llist, rlist); 1205 VM_MAP_ASSERT_CONSISTENT(map); 1206 return (root); 1207 } 1208 1209 /* 1210 * vm_map_entry_{un,}link: 1211 * 1212 * Insert/remove entries from maps. 1213 */ 1214 static void 1215 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1216 { 1217 vm_map_entry_t llist, rlist, root; 1218 1219 CTR3(KTR_VM, 1220 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1221 map->nentries, entry); 1222 VM_MAP_ASSERT_LOCKED(map); 1223 map->nentries++; 1224 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1225 KASSERT(root == NULL, 1226 ("vm_map_entry_link: link object already mapped")); 1227 entry->prev = llist; 1228 entry->next = rlist; 1229 llist->next = rlist->prev = entry; 1230 entry->left = entry->right = NULL; 1231 vm_map_splay_merge(map, entry, llist, rlist); 1232 VM_MAP_ASSERT_CONSISTENT(map); 1233 } 1234 1235 enum unlink_merge_type { 1236 UNLINK_MERGE_NONE, 1237 UNLINK_MERGE_NEXT 1238 }; 1239 1240 static void 1241 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, 1242 enum unlink_merge_type op) 1243 { 1244 vm_map_entry_t llist, rlist, root, y; 1245 1246 VM_MAP_ASSERT_LOCKED(map); 1247 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1248 KASSERT(root != NULL, 1249 ("vm_map_entry_unlink: unlink object not mapped")); 1250 1251 vm_map_splay_findnext(root, &rlist); 1252 switch (op) { 1253 case UNLINK_MERGE_NEXT: 1254 rlist->start = root->start; 1255 rlist->offset = root->offset; 1256 y = root->left; 1257 root = rlist; 1258 rlist = root->left; 1259 root->left = y; 1260 break; 1261 case UNLINK_MERGE_NONE: 1262 vm_map_splay_findprev(root, &llist); 1263 if (llist != &map->header) { 1264 root = llist; 1265 llist = root->right; 1266 root->right = NULL; 1267 } else if (rlist != &map->header) { 1268 root = rlist; 1269 rlist = root->left; 1270 root->left = NULL; 1271 } else 1272 root = NULL; 1273 break; 1274 } 1275 y = entry->next; 1276 y->prev = entry->prev; 1277 y->prev->next = y; 1278 if (root != NULL) 1279 vm_map_splay_merge(map, root, llist, rlist); 1280 else 1281 map->root = NULL; 1282 VM_MAP_ASSERT_CONSISTENT(map); 1283 map->nentries--; 1284 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1285 map->nentries, entry); 1286 } 1287 1288 /* 1289 * vm_map_entry_resize: 1290 * 1291 * Resize a vm_map_entry, recompute the amount of free space that 1292 * follows it and propagate that value up the tree. 1293 * 1294 * The map must be locked, and leaves it so. 1295 */ 1296 static void 1297 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) 1298 { 1299 vm_map_entry_t llist, rlist, root; 1300 1301 VM_MAP_ASSERT_LOCKED(map); 1302 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1303 KASSERT(root != NULL, 1304 ("%s: resize object not mapped", __func__)); 1305 vm_map_splay_findnext(root, &rlist); 1306 root->right = NULL; 1307 entry->end += grow_amount; 1308 vm_map_splay_merge(map, root, llist, rlist); 1309 VM_MAP_ASSERT_CONSISTENT(map); 1310 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", 1311 __func__, map, map->nentries, entry); 1312 } 1313 1314 /* 1315 * vm_map_lookup_entry: [ internal use only ] 1316 * 1317 * Finds the map entry containing (or 1318 * immediately preceding) the specified address 1319 * in the given map; the entry is returned 1320 * in the "entry" parameter. The boolean 1321 * result indicates whether the address is 1322 * actually contained in the map. 1323 */ 1324 boolean_t 1325 vm_map_lookup_entry( 1326 vm_map_t map, 1327 vm_offset_t address, 1328 vm_map_entry_t *entry) /* OUT */ 1329 { 1330 vm_map_entry_t cur, lbound; 1331 boolean_t locked; 1332 1333 /* 1334 * If the map is empty, then the map entry immediately preceding 1335 * "address" is the map's header. 1336 */ 1337 cur = map->root; 1338 if (cur == NULL) { 1339 *entry = &map->header; 1340 return (FALSE); 1341 } 1342 if (address >= cur->start && cur->end > address) { 1343 *entry = cur; 1344 return (TRUE); 1345 } 1346 if ((locked = vm_map_locked(map)) || 1347 sx_try_upgrade(&map->lock)) { 1348 /* 1349 * Splay requires a write lock on the map. However, it only 1350 * restructures the binary search tree; it does not otherwise 1351 * change the map. Thus, the map's timestamp need not change 1352 * on a temporary upgrade. 1353 */ 1354 cur = vm_map_splay(map, address); 1355 if (!locked) { 1356 VM_MAP_UNLOCK_CONSISTENT(map); 1357 sx_downgrade(&map->lock); 1358 } 1359 1360 /* 1361 * If "address" is contained within a map entry, the new root 1362 * is that map entry. Otherwise, the new root is a map entry 1363 * immediately before or after "address". 1364 */ 1365 if (address < cur->start) { 1366 *entry = &map->header; 1367 return (FALSE); 1368 } 1369 *entry = cur; 1370 return (address < cur->end); 1371 } 1372 /* 1373 * Since the map is only locked for read access, perform a 1374 * standard binary search tree lookup for "address". 1375 */ 1376 lbound = &map->header; 1377 do { 1378 if (address < cur->start) { 1379 cur = cur->left; 1380 } else if (cur->end <= address) { 1381 lbound = cur; 1382 cur = cur->right; 1383 } else { 1384 *entry = cur; 1385 return (TRUE); 1386 } 1387 } while (cur != NULL); 1388 *entry = lbound; 1389 return (FALSE); 1390 } 1391 1392 /* 1393 * vm_map_insert: 1394 * 1395 * Inserts the given whole VM object into the target 1396 * map at the specified address range. The object's 1397 * size should match that of the address range. 1398 * 1399 * Requires that the map be locked, and leaves it so. 1400 * 1401 * If object is non-NULL, ref count must be bumped by caller 1402 * prior to making call to account for the new entry. 1403 */ 1404 int 1405 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1406 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1407 { 1408 vm_map_entry_t new_entry, prev_entry; 1409 struct ucred *cred; 1410 vm_eflags_t protoeflags; 1411 vm_inherit_t inheritance; 1412 1413 VM_MAP_ASSERT_LOCKED(map); 1414 KASSERT(object != kernel_object || 1415 (cow & MAP_COPY_ON_WRITE) == 0, 1416 ("vm_map_insert: kernel object and COW")); 1417 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1418 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1419 KASSERT((prot & ~max) == 0, 1420 ("prot %#x is not subset of max_prot %#x", prot, max)); 1421 1422 /* 1423 * Check that the start and end points are not bogus. 1424 */ 1425 if (start < vm_map_min(map) || end > vm_map_max(map) || 1426 start >= end) 1427 return (KERN_INVALID_ADDRESS); 1428 1429 /* 1430 * Find the entry prior to the proposed starting address; if it's part 1431 * of an existing entry, this range is bogus. 1432 */ 1433 if (vm_map_lookup_entry(map, start, &prev_entry)) 1434 return (KERN_NO_SPACE); 1435 1436 /* 1437 * Assert that the next entry doesn't overlap the end point. 1438 */ 1439 if (prev_entry->next->start < end) 1440 return (KERN_NO_SPACE); 1441 1442 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1443 max != VM_PROT_NONE)) 1444 return (KERN_INVALID_ARGUMENT); 1445 1446 protoeflags = 0; 1447 if (cow & MAP_COPY_ON_WRITE) 1448 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1449 if (cow & MAP_NOFAULT) 1450 protoeflags |= MAP_ENTRY_NOFAULT; 1451 if (cow & MAP_DISABLE_SYNCER) 1452 protoeflags |= MAP_ENTRY_NOSYNC; 1453 if (cow & MAP_DISABLE_COREDUMP) 1454 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1455 if (cow & MAP_STACK_GROWS_DOWN) 1456 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1457 if (cow & MAP_STACK_GROWS_UP) 1458 protoeflags |= MAP_ENTRY_GROWS_UP; 1459 if (cow & MAP_WRITECOUNT) 1460 protoeflags |= MAP_ENTRY_WRITECNT; 1461 if (cow & MAP_VN_EXEC) 1462 protoeflags |= MAP_ENTRY_VN_EXEC; 1463 if ((cow & MAP_CREATE_GUARD) != 0) 1464 protoeflags |= MAP_ENTRY_GUARD; 1465 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1466 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1467 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1468 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1469 if (cow & MAP_INHERIT_SHARE) 1470 inheritance = VM_INHERIT_SHARE; 1471 else 1472 inheritance = VM_INHERIT_DEFAULT; 1473 1474 cred = NULL; 1475 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1476 goto charged; 1477 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1478 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1479 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1480 return (KERN_RESOURCE_SHORTAGE); 1481 KASSERT(object == NULL || 1482 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1483 object->cred == NULL, 1484 ("overcommit: vm_map_insert o %p", object)); 1485 cred = curthread->td_ucred; 1486 } 1487 1488 charged: 1489 /* Expand the kernel pmap, if necessary. */ 1490 if (map == kernel_map && end > kernel_vm_end) 1491 pmap_growkernel(end); 1492 if (object != NULL) { 1493 /* 1494 * OBJ_ONEMAPPING must be cleared unless this mapping 1495 * is trivially proven to be the only mapping for any 1496 * of the object's pages. (Object granularity 1497 * reference counting is insufficient to recognize 1498 * aliases with precision.) 1499 */ 1500 VM_OBJECT_WLOCK(object); 1501 if (object->ref_count > 1 || object->shadow_count != 0) 1502 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1503 VM_OBJECT_WUNLOCK(object); 1504 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1505 protoeflags && 1506 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP | 1507 MAP_VN_EXEC)) == 0 && 1508 prev_entry->end == start && (prev_entry->cred == cred || 1509 (prev_entry->object.vm_object != NULL && 1510 prev_entry->object.vm_object->cred == cred)) && 1511 vm_object_coalesce(prev_entry->object.vm_object, 1512 prev_entry->offset, 1513 (vm_size_t)(prev_entry->end - prev_entry->start), 1514 (vm_size_t)(end - prev_entry->end), cred != NULL && 1515 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1516 /* 1517 * We were able to extend the object. Determine if we 1518 * can extend the previous map entry to include the 1519 * new range as well. 1520 */ 1521 if (prev_entry->inheritance == inheritance && 1522 prev_entry->protection == prot && 1523 prev_entry->max_protection == max && 1524 prev_entry->wired_count == 0) { 1525 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1526 0, ("prev_entry %p has incoherent wiring", 1527 prev_entry)); 1528 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1529 map->size += end - prev_entry->end; 1530 vm_map_entry_resize(map, prev_entry, 1531 end - prev_entry->end); 1532 vm_map_try_merge_entries(map, prev_entry, prev_entry->next); 1533 return (KERN_SUCCESS); 1534 } 1535 1536 /* 1537 * If we can extend the object but cannot extend the 1538 * map entry, we have to create a new map entry. We 1539 * must bump the ref count on the extended object to 1540 * account for it. object may be NULL. 1541 */ 1542 object = prev_entry->object.vm_object; 1543 offset = prev_entry->offset + 1544 (prev_entry->end - prev_entry->start); 1545 vm_object_reference(object); 1546 if (cred != NULL && object != NULL && object->cred != NULL && 1547 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1548 /* Object already accounts for this uid. */ 1549 cred = NULL; 1550 } 1551 } 1552 if (cred != NULL) 1553 crhold(cred); 1554 1555 /* 1556 * Create a new entry 1557 */ 1558 new_entry = vm_map_entry_create(map); 1559 new_entry->start = start; 1560 new_entry->end = end; 1561 new_entry->cred = NULL; 1562 1563 new_entry->eflags = protoeflags; 1564 new_entry->object.vm_object = object; 1565 new_entry->offset = offset; 1566 1567 new_entry->inheritance = inheritance; 1568 new_entry->protection = prot; 1569 new_entry->max_protection = max; 1570 new_entry->wired_count = 0; 1571 new_entry->wiring_thread = NULL; 1572 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1573 new_entry->next_read = start; 1574 1575 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1576 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1577 new_entry->cred = cred; 1578 1579 /* 1580 * Insert the new entry into the list 1581 */ 1582 vm_map_entry_link(map, new_entry); 1583 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1584 map->size += new_entry->end - new_entry->start; 1585 1586 /* 1587 * Try to coalesce the new entry with both the previous and next 1588 * entries in the list. Previously, we only attempted to coalesce 1589 * with the previous entry when object is NULL. Here, we handle the 1590 * other cases, which are less common. 1591 */ 1592 vm_map_try_merge_entries(map, prev_entry, new_entry); 1593 vm_map_try_merge_entries(map, new_entry, new_entry->next); 1594 1595 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1596 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1597 end - start, cow & MAP_PREFAULT_PARTIAL); 1598 } 1599 1600 return (KERN_SUCCESS); 1601 } 1602 1603 /* 1604 * vm_map_findspace: 1605 * 1606 * Find the first fit (lowest VM address) for "length" free bytes 1607 * beginning at address >= start in the given map. 1608 * 1609 * In a vm_map_entry, "max_free" is the maximum amount of 1610 * contiguous free space between an entry in its subtree and a 1611 * neighbor of that entry. This allows finding a free region in 1612 * one path down the tree, so O(log n) amortized with splay 1613 * trees. 1614 * 1615 * The map must be locked, and leaves it so. 1616 * 1617 * Returns: starting address if sufficient space, 1618 * vm_map_max(map)-length+1 if insufficient space. 1619 */ 1620 vm_offset_t 1621 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1622 { 1623 vm_map_entry_t llist, rlist, root, y; 1624 vm_size_t left_length; 1625 vm_offset_t gap_end; 1626 1627 /* 1628 * Request must fit within min/max VM address and must avoid 1629 * address wrap. 1630 */ 1631 start = MAX(start, vm_map_min(map)); 1632 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) 1633 return (vm_map_max(map) - length + 1); 1634 1635 /* Empty tree means wide open address space. */ 1636 if (map->root == NULL) 1637 return (start); 1638 1639 /* 1640 * After splay_split, if start is within an entry, push it to the start 1641 * of the following gap. If rlist is at the end of the gap containing 1642 * start, save the end of that gap in gap_end to see if the gap is big 1643 * enough; otherwise set gap_end to start skip gap-checking and move 1644 * directly to a search of the right subtree. 1645 */ 1646 root = vm_map_splay_split(map, start, length, &llist, &rlist); 1647 gap_end = rlist->start; 1648 if (root != NULL) { 1649 start = root->end; 1650 if (root->right != NULL) 1651 gap_end = start; 1652 } else if (rlist != &map->header) { 1653 root = rlist; 1654 rlist = root->left; 1655 root->left = NULL; 1656 } else { 1657 root = llist; 1658 llist = root->right; 1659 root->right = NULL; 1660 } 1661 vm_map_splay_merge(map, root, llist, rlist); 1662 VM_MAP_ASSERT_CONSISTENT(map); 1663 if (length <= gap_end - start) 1664 return (start); 1665 1666 /* With max_free, can immediately tell if no solution. */ 1667 if (root->right == NULL || length > root->right->max_free) 1668 return (vm_map_max(map) - length + 1); 1669 1670 /* 1671 * Splay for the least large-enough gap in the right subtree. 1672 */ 1673 llist = rlist = &map->header; 1674 for (left_length = 0;; 1675 left_length = vm_map_entry_max_free_left(root, llist)) { 1676 if (length <= left_length) 1677 SPLAY_LEFT_STEP(root, y, rlist, 1678 length <= vm_map_entry_max_free_left(y, llist)); 1679 else 1680 SPLAY_RIGHT_STEP(root, y, llist, 1681 length > vm_map_entry_max_free_left(y, root)); 1682 if (root == NULL) 1683 break; 1684 } 1685 root = llist; 1686 llist = root->right; 1687 root->right = NULL; 1688 if (rlist != &map->header) { 1689 y = rlist; 1690 rlist = y->left; 1691 y->left = NULL; 1692 vm_map_splay_merge(map, y, &map->header, rlist); 1693 y->max_free = MAX( 1694 vm_map_entry_max_free_left(y, root), 1695 vm_map_entry_max_free_right(y, &map->header)); 1696 root->right = y; 1697 } 1698 vm_map_splay_merge(map, root, llist, &map->header); 1699 VM_MAP_ASSERT_CONSISTENT(map); 1700 return (root->end); 1701 } 1702 1703 int 1704 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1705 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1706 vm_prot_t max, int cow) 1707 { 1708 vm_offset_t end; 1709 int result; 1710 1711 end = start + length; 1712 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1713 object == NULL, 1714 ("vm_map_fixed: non-NULL backing object for stack")); 1715 vm_map_lock(map); 1716 VM_MAP_RANGE_CHECK(map, start, end); 1717 if ((cow & MAP_CHECK_EXCL) == 0) 1718 vm_map_delete(map, start, end); 1719 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1720 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1721 prot, max, cow); 1722 } else { 1723 result = vm_map_insert(map, object, offset, start, end, 1724 prot, max, cow); 1725 } 1726 vm_map_unlock(map); 1727 return (result); 1728 } 1729 1730 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1731 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1732 1733 static int cluster_anon = 1; 1734 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 1735 &cluster_anon, 0, 1736 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 1737 1738 static bool 1739 clustering_anon_allowed(vm_offset_t addr) 1740 { 1741 1742 switch (cluster_anon) { 1743 case 0: 1744 return (false); 1745 case 1: 1746 return (addr == 0); 1747 case 2: 1748 default: 1749 return (true); 1750 } 1751 } 1752 1753 static long aslr_restarts; 1754 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 1755 &aslr_restarts, 0, 1756 "Number of aslr failures"); 1757 1758 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 1759 1760 /* 1761 * Searches for the specified amount of free space in the given map with the 1762 * specified alignment. Performs an address-ordered, first-fit search from 1763 * the given address "*addr", with an optional upper bound "max_addr". If the 1764 * parameter "alignment" is zero, then the alignment is computed from the 1765 * given (object, offset) pair so as to enable the greatest possible use of 1766 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1767 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1768 * 1769 * The map must be locked. Initially, there must be at least "length" bytes 1770 * of free space at the given address. 1771 */ 1772 static int 1773 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1774 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1775 vm_offset_t alignment) 1776 { 1777 vm_offset_t aligned_addr, free_addr; 1778 1779 VM_MAP_ASSERT_LOCKED(map); 1780 free_addr = *addr; 1781 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 1782 ("caller failed to provide space %#jx at address %p", 1783 (uintmax_t)length, (void *)free_addr)); 1784 for (;;) { 1785 /* 1786 * At the start of every iteration, the free space at address 1787 * "*addr" is at least "length" bytes. 1788 */ 1789 if (alignment == 0) 1790 pmap_align_superpage(object, offset, addr, length); 1791 else if ((*addr & (alignment - 1)) != 0) { 1792 *addr &= ~(alignment - 1); 1793 *addr += alignment; 1794 } 1795 aligned_addr = *addr; 1796 if (aligned_addr == free_addr) { 1797 /* 1798 * Alignment did not change "*addr", so "*addr" must 1799 * still provide sufficient free space. 1800 */ 1801 return (KERN_SUCCESS); 1802 } 1803 1804 /* 1805 * Test for address wrap on "*addr". A wrapped "*addr" could 1806 * be a valid address, in which case vm_map_findspace() cannot 1807 * be relied upon to fail. 1808 */ 1809 if (aligned_addr < free_addr) 1810 return (KERN_NO_SPACE); 1811 *addr = vm_map_findspace(map, aligned_addr, length); 1812 if (*addr + length > vm_map_max(map) || 1813 (max_addr != 0 && *addr + length > max_addr)) 1814 return (KERN_NO_SPACE); 1815 free_addr = *addr; 1816 if (free_addr == aligned_addr) { 1817 /* 1818 * If a successful call to vm_map_findspace() did not 1819 * change "*addr", then "*addr" must still be aligned 1820 * and provide sufficient free space. 1821 */ 1822 return (KERN_SUCCESS); 1823 } 1824 } 1825 } 1826 1827 /* 1828 * vm_map_find finds an unallocated region in the target address 1829 * map with the given length. The search is defined to be 1830 * first-fit from the specified address; the region found is 1831 * returned in the same parameter. 1832 * 1833 * If object is non-NULL, ref count must be bumped by caller 1834 * prior to making call to account for the new entry. 1835 */ 1836 int 1837 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1838 vm_offset_t *addr, /* IN/OUT */ 1839 vm_size_t length, vm_offset_t max_addr, int find_space, 1840 vm_prot_t prot, vm_prot_t max, int cow) 1841 { 1842 vm_offset_t alignment, curr_min_addr, min_addr; 1843 int gap, pidx, rv, try; 1844 bool cluster, en_aslr, update_anon; 1845 1846 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1847 object == NULL, 1848 ("vm_map_find: non-NULL backing object for stack")); 1849 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 1850 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 1851 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1852 (object->flags & OBJ_COLORED) == 0)) 1853 find_space = VMFS_ANY_SPACE; 1854 if (find_space >> 8 != 0) { 1855 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1856 alignment = (vm_offset_t)1 << (find_space >> 8); 1857 } else 1858 alignment = 0; 1859 en_aslr = (map->flags & MAP_ASLR) != 0; 1860 update_anon = cluster = clustering_anon_allowed(*addr) && 1861 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 1862 find_space != VMFS_NO_SPACE && object == NULL && 1863 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP | 1864 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE; 1865 curr_min_addr = min_addr = *addr; 1866 if (en_aslr && min_addr == 0 && !cluster && 1867 find_space != VMFS_NO_SPACE && 1868 (map->flags & MAP_ASLR_IGNSTART) != 0) 1869 curr_min_addr = min_addr = vm_map_min(map); 1870 try = 0; 1871 vm_map_lock(map); 1872 if (cluster) { 1873 curr_min_addr = map->anon_loc; 1874 if (curr_min_addr == 0) 1875 cluster = false; 1876 } 1877 if (find_space != VMFS_NO_SPACE) { 1878 KASSERT(find_space == VMFS_ANY_SPACE || 1879 find_space == VMFS_OPTIMAL_SPACE || 1880 find_space == VMFS_SUPER_SPACE || 1881 alignment != 0, ("unexpected VMFS flag")); 1882 again: 1883 /* 1884 * When creating an anonymous mapping, try clustering 1885 * with an existing anonymous mapping first. 1886 * 1887 * We make up to two attempts to find address space 1888 * for a given find_space value. The first attempt may 1889 * apply randomization or may cluster with an existing 1890 * anonymous mapping. If this first attempt fails, 1891 * perform a first-fit search of the available address 1892 * space. 1893 * 1894 * If all tries failed, and find_space is 1895 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 1896 * Again enable clustering and randomization. 1897 */ 1898 try++; 1899 MPASS(try <= 2); 1900 1901 if (try == 2) { 1902 /* 1903 * Second try: we failed either to find a 1904 * suitable region for randomizing the 1905 * allocation, or to cluster with an existing 1906 * mapping. Retry with free run. 1907 */ 1908 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 1909 vm_map_min(map) : min_addr; 1910 atomic_add_long(&aslr_restarts, 1); 1911 } 1912 1913 if (try == 1 && en_aslr && !cluster) { 1914 /* 1915 * Find space for allocation, including 1916 * gap needed for later randomization. 1917 */ 1918 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && 1919 (find_space == VMFS_SUPER_SPACE || find_space == 1920 VMFS_OPTIMAL_SPACE) ? 1 : 0; 1921 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 1922 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 1923 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 1924 *addr = vm_map_findspace(map, curr_min_addr, 1925 length + gap * pagesizes[pidx]); 1926 if (*addr + length + gap * pagesizes[pidx] > 1927 vm_map_max(map)) 1928 goto again; 1929 /* And randomize the start address. */ 1930 *addr += (arc4random() % gap) * pagesizes[pidx]; 1931 if (max_addr != 0 && *addr + length > max_addr) 1932 goto again; 1933 } else { 1934 *addr = vm_map_findspace(map, curr_min_addr, length); 1935 if (*addr + length > vm_map_max(map) || 1936 (max_addr != 0 && *addr + length > max_addr)) { 1937 if (cluster) { 1938 cluster = false; 1939 MPASS(try == 1); 1940 goto again; 1941 } 1942 rv = KERN_NO_SPACE; 1943 goto done; 1944 } 1945 } 1946 1947 if (find_space != VMFS_ANY_SPACE && 1948 (rv = vm_map_alignspace(map, object, offset, addr, length, 1949 max_addr, alignment)) != KERN_SUCCESS) { 1950 if (find_space == VMFS_OPTIMAL_SPACE) { 1951 find_space = VMFS_ANY_SPACE; 1952 curr_min_addr = min_addr; 1953 cluster = update_anon; 1954 try = 0; 1955 goto again; 1956 } 1957 goto done; 1958 } 1959 } else if ((cow & MAP_REMAP) != 0) { 1960 if (*addr < vm_map_min(map) || 1961 *addr + length > vm_map_max(map) || 1962 *addr + length <= length) { 1963 rv = KERN_INVALID_ADDRESS; 1964 goto done; 1965 } 1966 vm_map_delete(map, *addr, *addr + length); 1967 } 1968 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1969 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1970 max, cow); 1971 } else { 1972 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1973 prot, max, cow); 1974 } 1975 if (rv == KERN_SUCCESS && update_anon) 1976 map->anon_loc = *addr + length; 1977 done: 1978 vm_map_unlock(map); 1979 return (rv); 1980 } 1981 1982 /* 1983 * vm_map_find_min() is a variant of vm_map_find() that takes an 1984 * additional parameter (min_addr) and treats the given address 1985 * (*addr) differently. Specifically, it treats *addr as a hint 1986 * and not as the minimum address where the mapping is created. 1987 * 1988 * This function works in two phases. First, it tries to 1989 * allocate above the hint. If that fails and the hint is 1990 * greater than min_addr, it performs a second pass, replacing 1991 * the hint with min_addr as the minimum address for the 1992 * allocation. 1993 */ 1994 int 1995 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1996 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1997 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1998 int cow) 1999 { 2000 vm_offset_t hint; 2001 int rv; 2002 2003 hint = *addr; 2004 for (;;) { 2005 rv = vm_map_find(map, object, offset, addr, length, max_addr, 2006 find_space, prot, max, cow); 2007 if (rv == KERN_SUCCESS || min_addr >= hint) 2008 return (rv); 2009 *addr = hint = min_addr; 2010 } 2011 } 2012 2013 /* 2014 * A map entry with any of the following flags set must not be merged with 2015 * another entry. 2016 */ 2017 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 2018 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC) 2019 2020 static bool 2021 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 2022 { 2023 2024 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 2025 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 2026 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 2027 prev, entry)); 2028 return (prev->end == entry->start && 2029 prev->object.vm_object == entry->object.vm_object && 2030 (prev->object.vm_object == NULL || 2031 prev->offset + (prev->end - prev->start) == entry->offset) && 2032 prev->eflags == entry->eflags && 2033 prev->protection == entry->protection && 2034 prev->max_protection == entry->max_protection && 2035 prev->inheritance == entry->inheritance && 2036 prev->wired_count == entry->wired_count && 2037 prev->cred == entry->cred); 2038 } 2039 2040 static void 2041 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2042 { 2043 2044 /* 2045 * If the backing object is a vnode object, vm_object_deallocate() 2046 * calls vrele(). However, vrele() does not lock the vnode because 2047 * the vnode has additional references. Thus, the map lock can be 2048 * kept without causing a lock-order reversal with the vnode lock. 2049 * 2050 * Since we count the number of virtual page mappings in 2051 * object->un_pager.vnp.writemappings, the writemappings value 2052 * should not be adjusted when the entry is disposed of. 2053 */ 2054 if (entry->object.vm_object != NULL) 2055 vm_object_deallocate(entry->object.vm_object); 2056 if (entry->cred != NULL) 2057 crfree(entry->cred); 2058 vm_map_entry_dispose(map, entry); 2059 } 2060 2061 /* 2062 * vm_map_try_merge_entries: 2063 * 2064 * Compare the given map entry to its predecessor, and merge its precessor 2065 * into it if possible. The entry remains valid, and may be extended. 2066 * The predecessor may be deleted. 2067 * 2068 * The map must be locked. 2069 */ 2070 void 2071 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry) 2072 { 2073 2074 VM_MAP_ASSERT_LOCKED(map); 2075 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && 2076 vm_map_mergeable_neighbors(prev, entry)) { 2077 vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); 2078 vm_map_merged_neighbor_dispose(map, prev); 2079 } 2080 } 2081 2082 /* 2083 * vm_map_entry_back: 2084 * 2085 * Allocate an object to back a map entry. 2086 */ 2087 static inline void 2088 vm_map_entry_back(vm_map_entry_t entry) 2089 { 2090 vm_object_t object; 2091 2092 KASSERT(entry->object.vm_object == NULL, 2093 ("map entry %p has backing object", entry)); 2094 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2095 ("map entry %p is a submap", entry)); 2096 object = vm_object_allocate(OBJT_DEFAULT, 2097 atop(entry->end - entry->start)); 2098 entry->object.vm_object = object; 2099 entry->offset = 0; 2100 if (entry->cred != NULL) { 2101 object->cred = entry->cred; 2102 object->charge = entry->end - entry->start; 2103 entry->cred = NULL; 2104 } 2105 } 2106 2107 /* 2108 * vm_map_entry_charge_object 2109 * 2110 * If there is no object backing this entry, create one. Otherwise, if 2111 * the entry has cred, give it to the backing object. 2112 */ 2113 static inline void 2114 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) 2115 { 2116 2117 VM_MAP_ASSERT_LOCKED(map); 2118 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2119 ("map entry %p is a submap", entry)); 2120 if (entry->object.vm_object == NULL && !map->system_map && 2121 (entry->eflags & MAP_ENTRY_GUARD) == 0) 2122 vm_map_entry_back(entry); 2123 else if (entry->object.vm_object != NULL && 2124 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2125 entry->cred != NULL) { 2126 VM_OBJECT_WLOCK(entry->object.vm_object); 2127 KASSERT(entry->object.vm_object->cred == NULL, 2128 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); 2129 entry->object.vm_object->cred = entry->cred; 2130 entry->object.vm_object->charge = entry->end - entry->start; 2131 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2132 entry->cred = NULL; 2133 } 2134 } 2135 2136 /* 2137 * vm_map_clip_start: [ internal use only ] 2138 * 2139 * Asserts that the given entry begins at or after 2140 * the specified address; if necessary, 2141 * it splits the entry into two. 2142 */ 2143 #define vm_map_clip_start(map, entry, startaddr) \ 2144 { \ 2145 if (startaddr > entry->start) \ 2146 _vm_map_clip_start(map, entry, startaddr); \ 2147 } 2148 2149 /* 2150 * This routine is called only when it is known that 2151 * the entry must be split. 2152 */ 2153 static void 2154 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 2155 { 2156 vm_map_entry_t new_entry; 2157 2158 VM_MAP_ASSERT_LOCKED(map); 2159 KASSERT(entry->end > start && entry->start < start, 2160 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 2161 2162 /* 2163 * Create a backing object now, if none exists, so that more individual 2164 * objects won't be created after the map entry is split. 2165 */ 2166 vm_map_entry_charge_object(map, entry); 2167 2168 /* Clone the entry. */ 2169 new_entry = vm_map_entry_create(map); 2170 *new_entry = *entry; 2171 2172 /* 2173 * Split off the front portion. Insert the new entry BEFORE this one, 2174 * so that this entry has the specified starting address. 2175 */ 2176 new_entry->end = start; 2177 entry->offset += (start - entry->start); 2178 entry->start = start; 2179 if (new_entry->cred != NULL) 2180 crhold(entry->cred); 2181 2182 vm_map_entry_link(map, new_entry); 2183 2184 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2185 vm_object_reference(new_entry->object.vm_object); 2186 vm_map_entry_set_vnode_text(new_entry, true); 2187 /* 2188 * The object->un_pager.vnp.writemappings for the 2189 * object of MAP_ENTRY_WRITECNT type entry shall be 2190 * kept as is here. The virtual pages are 2191 * re-distributed among the clipped entries, so the sum is 2192 * left the same. 2193 */ 2194 } 2195 } 2196 2197 /* 2198 * vm_map_clip_end: [ internal use only ] 2199 * 2200 * Asserts that the given entry ends at or before 2201 * the specified address; if necessary, 2202 * it splits the entry into two. 2203 */ 2204 #define vm_map_clip_end(map, entry, endaddr) \ 2205 { \ 2206 if ((endaddr) < (entry->end)) \ 2207 _vm_map_clip_end((map), (entry), (endaddr)); \ 2208 } 2209 2210 /* 2211 * This routine is called only when it is known that 2212 * the entry must be split. 2213 */ 2214 static void 2215 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 2216 { 2217 vm_map_entry_t new_entry; 2218 2219 VM_MAP_ASSERT_LOCKED(map); 2220 KASSERT(entry->start < end && entry->end > end, 2221 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 2222 2223 /* 2224 * Create a backing object now, if none exists, so that more individual 2225 * objects won't be created after the map entry is split. 2226 */ 2227 vm_map_entry_charge_object(map, entry); 2228 2229 /* Clone the entry. */ 2230 new_entry = vm_map_entry_create(map); 2231 *new_entry = *entry; 2232 2233 /* 2234 * Split off the back portion. Insert the new entry AFTER this one, 2235 * so that this entry has the specified ending address. 2236 */ 2237 new_entry->start = entry->end = end; 2238 new_entry->offset += (end - entry->start); 2239 if (new_entry->cred != NULL) 2240 crhold(entry->cred); 2241 2242 vm_map_entry_link(map, new_entry); 2243 2244 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2245 vm_object_reference(new_entry->object.vm_object); 2246 vm_map_entry_set_vnode_text(new_entry, true); 2247 } 2248 } 2249 2250 /* 2251 * vm_map_submap: [ kernel use only ] 2252 * 2253 * Mark the given range as handled by a subordinate map. 2254 * 2255 * This range must have been created with vm_map_find, 2256 * and no other operations may have been performed on this 2257 * range prior to calling vm_map_submap. 2258 * 2259 * Only a limited number of operations can be performed 2260 * within this rage after calling vm_map_submap: 2261 * vm_fault 2262 * [Don't try vm_map_copy!] 2263 * 2264 * To remove a submapping, one must first remove the 2265 * range from the superior map, and then destroy the 2266 * submap (if desired). [Better yet, don't try it.] 2267 */ 2268 int 2269 vm_map_submap( 2270 vm_map_t map, 2271 vm_offset_t start, 2272 vm_offset_t end, 2273 vm_map_t submap) 2274 { 2275 vm_map_entry_t entry; 2276 int result; 2277 2278 result = KERN_INVALID_ARGUMENT; 2279 2280 vm_map_lock(submap); 2281 submap->flags |= MAP_IS_SUB_MAP; 2282 vm_map_unlock(submap); 2283 2284 vm_map_lock(map); 2285 2286 VM_MAP_RANGE_CHECK(map, start, end); 2287 2288 if (vm_map_lookup_entry(map, start, &entry)) { 2289 vm_map_clip_start(map, entry, start); 2290 } else 2291 entry = entry->next; 2292 2293 vm_map_clip_end(map, entry, end); 2294 2295 if ((entry->start == start) && (entry->end == end) && 2296 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2297 (entry->object.vm_object == NULL)) { 2298 entry->object.sub_map = submap; 2299 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2300 result = KERN_SUCCESS; 2301 } 2302 vm_map_unlock(map); 2303 2304 if (result != KERN_SUCCESS) { 2305 vm_map_lock(submap); 2306 submap->flags &= ~MAP_IS_SUB_MAP; 2307 vm_map_unlock(submap); 2308 } 2309 return (result); 2310 } 2311 2312 /* 2313 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2314 */ 2315 #define MAX_INIT_PT 96 2316 2317 /* 2318 * vm_map_pmap_enter: 2319 * 2320 * Preload the specified map's pmap with mappings to the specified 2321 * object's memory-resident pages. No further physical pages are 2322 * allocated, and no further virtual pages are retrieved from secondary 2323 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2324 * limited number of page mappings are created at the low-end of the 2325 * specified address range. (For this purpose, a superpage mapping 2326 * counts as one page mapping.) Otherwise, all resident pages within 2327 * the specified address range are mapped. 2328 */ 2329 static void 2330 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2331 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2332 { 2333 vm_offset_t start; 2334 vm_page_t p, p_start; 2335 vm_pindex_t mask, psize, threshold, tmpidx; 2336 2337 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2338 return; 2339 VM_OBJECT_RLOCK(object); 2340 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2341 VM_OBJECT_RUNLOCK(object); 2342 VM_OBJECT_WLOCK(object); 2343 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2344 pmap_object_init_pt(map->pmap, addr, object, pindex, 2345 size); 2346 VM_OBJECT_WUNLOCK(object); 2347 return; 2348 } 2349 VM_OBJECT_LOCK_DOWNGRADE(object); 2350 } 2351 2352 psize = atop(size); 2353 if (psize + pindex > object->size) { 2354 if (object->size < pindex) { 2355 VM_OBJECT_RUNLOCK(object); 2356 return; 2357 } 2358 psize = object->size - pindex; 2359 } 2360 2361 start = 0; 2362 p_start = NULL; 2363 threshold = MAX_INIT_PT; 2364 2365 p = vm_page_find_least(object, pindex); 2366 /* 2367 * Assert: the variable p is either (1) the page with the 2368 * least pindex greater than or equal to the parameter pindex 2369 * or (2) NULL. 2370 */ 2371 for (; 2372 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2373 p = TAILQ_NEXT(p, listq)) { 2374 /* 2375 * don't allow an madvise to blow away our really 2376 * free pages allocating pv entries. 2377 */ 2378 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2379 vm_page_count_severe()) || 2380 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2381 tmpidx >= threshold)) { 2382 psize = tmpidx; 2383 break; 2384 } 2385 if (vm_page_all_valid(p)) { 2386 if (p_start == NULL) { 2387 start = addr + ptoa(tmpidx); 2388 p_start = p; 2389 } 2390 /* Jump ahead if a superpage mapping is possible. */ 2391 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2392 (pagesizes[p->psind] - 1)) == 0) { 2393 mask = atop(pagesizes[p->psind]) - 1; 2394 if (tmpidx + mask < psize && 2395 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2396 p += mask; 2397 threshold += mask; 2398 } 2399 } 2400 } else if (p_start != NULL) { 2401 pmap_enter_object(map->pmap, start, addr + 2402 ptoa(tmpidx), p_start, prot); 2403 p_start = NULL; 2404 } 2405 } 2406 if (p_start != NULL) 2407 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2408 p_start, prot); 2409 VM_OBJECT_RUNLOCK(object); 2410 } 2411 2412 /* 2413 * vm_map_protect: 2414 * 2415 * Sets the protection of the specified address 2416 * region in the target map. If "set_max" is 2417 * specified, the maximum protection is to be set; 2418 * otherwise, only the current protection is affected. 2419 */ 2420 int 2421 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2422 vm_prot_t new_prot, boolean_t set_max) 2423 { 2424 vm_map_entry_t current, entry, in_tran; 2425 vm_object_t obj; 2426 struct ucred *cred; 2427 vm_prot_t old_prot; 2428 int rv; 2429 2430 if (start == end) 2431 return (KERN_SUCCESS); 2432 2433 again: 2434 in_tran = NULL; 2435 vm_map_lock(map); 2436 2437 /* 2438 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2439 * need to fault pages into the map and will drop the map lock while 2440 * doing so, and the VM object may end up in an inconsistent state if we 2441 * update the protection on the map entry in between faults. 2442 */ 2443 vm_map_wait_busy(map); 2444 2445 VM_MAP_RANGE_CHECK(map, start, end); 2446 2447 if (!vm_map_lookup_entry(map, start, &entry)) 2448 entry = entry->next; 2449 2450 /* 2451 * Make a first pass to check for protection violations. 2452 */ 2453 for (current = entry; current->start < end; current = current->next) { 2454 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2455 continue; 2456 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2457 vm_map_unlock(map); 2458 return (KERN_INVALID_ARGUMENT); 2459 } 2460 if ((new_prot & current->max_protection) != new_prot) { 2461 vm_map_unlock(map); 2462 return (KERN_PROTECTION_FAILURE); 2463 } 2464 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2465 in_tran = entry; 2466 } 2467 2468 /* 2469 * Postpone the operation until all in transition map entries 2470 * are stabilized. In-transition entry might already have its 2471 * pages wired and wired_count incremented, but 2472 * MAP_ENTRY_USER_WIRED flag not yet set, and visible to other 2473 * threads because the map lock is dropped. In this case we 2474 * would miss our call to vm_fault_copy_entry(). 2475 */ 2476 if (in_tran != NULL) { 2477 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2478 vm_map_unlock_and_wait(map, 0); 2479 goto again; 2480 } 2481 2482 /* 2483 * Before changing the protections, try to reserve swap space for any 2484 * private (i.e., copy-on-write) mappings that are transitioning from 2485 * read-only to read/write access. If a reservation fails, break out 2486 * of this loop early and let the next loop simplify the entries, since 2487 * some may now be mergeable. 2488 */ 2489 rv = KERN_SUCCESS; 2490 vm_map_clip_start(map, entry, start); 2491 for (current = entry; current->start < end; current = current->next) { 2492 2493 vm_map_clip_end(map, current, end); 2494 2495 if (set_max || 2496 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2497 ENTRY_CHARGED(current) || 2498 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2499 continue; 2500 } 2501 2502 cred = curthread->td_ucred; 2503 obj = current->object.vm_object; 2504 2505 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2506 if (!swap_reserve(current->end - current->start)) { 2507 rv = KERN_RESOURCE_SHORTAGE; 2508 end = current->end; 2509 break; 2510 } 2511 crhold(cred); 2512 current->cred = cred; 2513 continue; 2514 } 2515 2516 VM_OBJECT_WLOCK(obj); 2517 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2518 VM_OBJECT_WUNLOCK(obj); 2519 continue; 2520 } 2521 2522 /* 2523 * Charge for the whole object allocation now, since 2524 * we cannot distinguish between non-charged and 2525 * charged clipped mapping of the same object later. 2526 */ 2527 KASSERT(obj->charge == 0, 2528 ("vm_map_protect: object %p overcharged (entry %p)", 2529 obj, current)); 2530 if (!swap_reserve(ptoa(obj->size))) { 2531 VM_OBJECT_WUNLOCK(obj); 2532 rv = KERN_RESOURCE_SHORTAGE; 2533 end = current->end; 2534 break; 2535 } 2536 2537 crhold(cred); 2538 obj->cred = cred; 2539 obj->charge = ptoa(obj->size); 2540 VM_OBJECT_WUNLOCK(obj); 2541 } 2542 2543 /* 2544 * If enough swap space was available, go back and fix up protections. 2545 * Otherwise, just simplify entries, since some may have been modified. 2546 * [Note that clipping is not necessary the second time.] 2547 */ 2548 for (current = entry; current->start < end; 2549 vm_map_try_merge_entries(map, current->prev, current), 2550 current = current->next) { 2551 if (rv != KERN_SUCCESS || 2552 (current->eflags & MAP_ENTRY_GUARD) != 0) 2553 continue; 2554 2555 old_prot = current->protection; 2556 2557 if (set_max) 2558 current->protection = 2559 (current->max_protection = new_prot) & 2560 old_prot; 2561 else 2562 current->protection = new_prot; 2563 2564 /* 2565 * For user wired map entries, the normal lazy evaluation of 2566 * write access upgrades through soft page faults is 2567 * undesirable. Instead, immediately copy any pages that are 2568 * copy-on-write and enable write access in the physical map. 2569 */ 2570 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2571 (current->protection & VM_PROT_WRITE) != 0 && 2572 (old_prot & VM_PROT_WRITE) == 0) 2573 vm_fault_copy_entry(map, map, current, current, NULL); 2574 2575 /* 2576 * When restricting access, update the physical map. Worry 2577 * about copy-on-write here. 2578 */ 2579 if ((old_prot & ~current->protection) != 0) { 2580 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2581 VM_PROT_ALL) 2582 pmap_protect(map->pmap, current->start, 2583 current->end, 2584 current->protection & MASK(current)); 2585 #undef MASK 2586 } 2587 } 2588 vm_map_try_merge_entries(map, current->prev, current); 2589 vm_map_unlock(map); 2590 return (rv); 2591 } 2592 2593 /* 2594 * vm_map_madvise: 2595 * 2596 * This routine traverses a processes map handling the madvise 2597 * system call. Advisories are classified as either those effecting 2598 * the vm_map_entry structure, or those effecting the underlying 2599 * objects. 2600 */ 2601 int 2602 vm_map_madvise( 2603 vm_map_t map, 2604 vm_offset_t start, 2605 vm_offset_t end, 2606 int behav) 2607 { 2608 vm_map_entry_t current, entry; 2609 bool modify_map; 2610 2611 /* 2612 * Some madvise calls directly modify the vm_map_entry, in which case 2613 * we need to use an exclusive lock on the map and we need to perform 2614 * various clipping operations. Otherwise we only need a read-lock 2615 * on the map. 2616 */ 2617 switch(behav) { 2618 case MADV_NORMAL: 2619 case MADV_SEQUENTIAL: 2620 case MADV_RANDOM: 2621 case MADV_NOSYNC: 2622 case MADV_AUTOSYNC: 2623 case MADV_NOCORE: 2624 case MADV_CORE: 2625 if (start == end) 2626 return (0); 2627 modify_map = true; 2628 vm_map_lock(map); 2629 break; 2630 case MADV_WILLNEED: 2631 case MADV_DONTNEED: 2632 case MADV_FREE: 2633 if (start == end) 2634 return (0); 2635 modify_map = false; 2636 vm_map_lock_read(map); 2637 break; 2638 default: 2639 return (EINVAL); 2640 } 2641 2642 /* 2643 * Locate starting entry and clip if necessary. 2644 */ 2645 VM_MAP_RANGE_CHECK(map, start, end); 2646 2647 if (vm_map_lookup_entry(map, start, &entry)) { 2648 if (modify_map) 2649 vm_map_clip_start(map, entry, start); 2650 } else { 2651 entry = entry->next; 2652 } 2653 2654 if (modify_map) { 2655 /* 2656 * madvise behaviors that are implemented in the vm_map_entry. 2657 * 2658 * We clip the vm_map_entry so that behavioral changes are 2659 * limited to the specified address range. 2660 */ 2661 for (current = entry; current->start < end; 2662 current = current->next) { 2663 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2664 continue; 2665 2666 vm_map_clip_end(map, current, end); 2667 2668 switch (behav) { 2669 case MADV_NORMAL: 2670 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2671 break; 2672 case MADV_SEQUENTIAL: 2673 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2674 break; 2675 case MADV_RANDOM: 2676 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2677 break; 2678 case MADV_NOSYNC: 2679 current->eflags |= MAP_ENTRY_NOSYNC; 2680 break; 2681 case MADV_AUTOSYNC: 2682 current->eflags &= ~MAP_ENTRY_NOSYNC; 2683 break; 2684 case MADV_NOCORE: 2685 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2686 break; 2687 case MADV_CORE: 2688 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2689 break; 2690 default: 2691 break; 2692 } 2693 vm_map_try_merge_entries(map, current->prev, current); 2694 } 2695 vm_map_try_merge_entries(map, current->prev, current); 2696 vm_map_unlock(map); 2697 } else { 2698 vm_pindex_t pstart, pend; 2699 2700 /* 2701 * madvise behaviors that are implemented in the underlying 2702 * vm_object. 2703 * 2704 * Since we don't clip the vm_map_entry, we have to clip 2705 * the vm_object pindex and count. 2706 */ 2707 for (current = entry; current->start < end; 2708 current = current->next) { 2709 vm_offset_t useEnd, useStart; 2710 2711 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2712 continue; 2713 2714 /* 2715 * MADV_FREE would otherwise rewind time to 2716 * the creation of the shadow object. Because 2717 * we hold the VM map read-locked, neither the 2718 * entry's object nor the presence of a 2719 * backing object can change. 2720 */ 2721 if (behav == MADV_FREE && 2722 current->object.vm_object != NULL && 2723 current->object.vm_object->backing_object != NULL) 2724 continue; 2725 2726 pstart = OFF_TO_IDX(current->offset); 2727 pend = pstart + atop(current->end - current->start); 2728 useStart = current->start; 2729 useEnd = current->end; 2730 2731 if (current->start < start) { 2732 pstart += atop(start - current->start); 2733 useStart = start; 2734 } 2735 if (current->end > end) { 2736 pend -= atop(current->end - end); 2737 useEnd = end; 2738 } 2739 2740 if (pstart >= pend) 2741 continue; 2742 2743 /* 2744 * Perform the pmap_advise() before clearing 2745 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2746 * concurrent pmap operation, such as pmap_remove(), 2747 * could clear a reference in the pmap and set 2748 * PGA_REFERENCED on the page before the pmap_advise() 2749 * had completed. Consequently, the page would appear 2750 * referenced based upon an old reference that 2751 * occurred before this pmap_advise() ran. 2752 */ 2753 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2754 pmap_advise(map->pmap, useStart, useEnd, 2755 behav); 2756 2757 vm_object_madvise(current->object.vm_object, pstart, 2758 pend, behav); 2759 2760 /* 2761 * Pre-populate paging structures in the 2762 * WILLNEED case. For wired entries, the 2763 * paging structures are already populated. 2764 */ 2765 if (behav == MADV_WILLNEED && 2766 current->wired_count == 0) { 2767 vm_map_pmap_enter(map, 2768 useStart, 2769 current->protection, 2770 current->object.vm_object, 2771 pstart, 2772 ptoa(pend - pstart), 2773 MAP_PREFAULT_MADVISE 2774 ); 2775 } 2776 } 2777 vm_map_unlock_read(map); 2778 } 2779 return (0); 2780 } 2781 2782 2783 /* 2784 * vm_map_inherit: 2785 * 2786 * Sets the inheritance of the specified address 2787 * range in the target map. Inheritance 2788 * affects how the map will be shared with 2789 * child maps at the time of vmspace_fork. 2790 */ 2791 int 2792 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2793 vm_inherit_t new_inheritance) 2794 { 2795 vm_map_entry_t entry; 2796 vm_map_entry_t temp_entry; 2797 2798 switch (new_inheritance) { 2799 case VM_INHERIT_NONE: 2800 case VM_INHERIT_COPY: 2801 case VM_INHERIT_SHARE: 2802 case VM_INHERIT_ZERO: 2803 break; 2804 default: 2805 return (KERN_INVALID_ARGUMENT); 2806 } 2807 if (start == end) 2808 return (KERN_SUCCESS); 2809 vm_map_lock(map); 2810 VM_MAP_RANGE_CHECK(map, start, end); 2811 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2812 entry = temp_entry; 2813 vm_map_clip_start(map, entry, start); 2814 } else 2815 entry = temp_entry->next; 2816 while (entry->start < end) { 2817 vm_map_clip_end(map, entry, end); 2818 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2819 new_inheritance != VM_INHERIT_ZERO) 2820 entry->inheritance = new_inheritance; 2821 vm_map_try_merge_entries(map, entry->prev, entry); 2822 entry = entry->next; 2823 } 2824 vm_map_try_merge_entries(map, entry->prev, entry); 2825 vm_map_unlock(map); 2826 return (KERN_SUCCESS); 2827 } 2828 2829 /* 2830 * vm_map_entry_in_transition: 2831 * 2832 * Release the map lock, and sleep until the entry is no longer in 2833 * transition. Awake and acquire the map lock. If the map changed while 2834 * another held the lock, lookup a possibly-changed entry at or after the 2835 * 'start' position of the old entry. 2836 */ 2837 static vm_map_entry_t 2838 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, 2839 vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry) 2840 { 2841 vm_map_entry_t entry; 2842 vm_offset_t start; 2843 u_int last_timestamp; 2844 2845 VM_MAP_ASSERT_LOCKED(map); 2846 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2847 ("not in-tranition map entry %p", in_entry)); 2848 /* 2849 * We have not yet clipped the entry. 2850 */ 2851 start = MAX(in_start, in_entry->start); 2852 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2853 last_timestamp = map->timestamp; 2854 if (vm_map_unlock_and_wait(map, 0)) { 2855 /* 2856 * Allow interruption of user wiring/unwiring? 2857 */ 2858 } 2859 vm_map_lock(map); 2860 if (last_timestamp + 1 == map->timestamp) 2861 return (in_entry); 2862 2863 /* 2864 * Look again for the entry because the map was modified while it was 2865 * unlocked. Specifically, the entry may have been clipped, merged, or 2866 * deleted. 2867 */ 2868 if (!vm_map_lookup_entry(map, start, &entry)) { 2869 if (!holes_ok) { 2870 *io_end = start; 2871 return (NULL); 2872 } 2873 entry = entry->next; 2874 } 2875 return (entry); 2876 } 2877 2878 /* 2879 * vm_map_unwire: 2880 * 2881 * Implements both kernel and user unwiring. 2882 */ 2883 int 2884 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2885 int flags) 2886 { 2887 vm_map_entry_t entry, first_entry; 2888 int rv; 2889 bool first_iteration, holes_ok, need_wakeup, user_unwire; 2890 2891 if (start == end) 2892 return (KERN_SUCCESS); 2893 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 2894 user_unwire = (flags & VM_MAP_WIRE_USER) != 0; 2895 vm_map_lock(map); 2896 VM_MAP_RANGE_CHECK(map, start, end); 2897 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2898 if (holes_ok) 2899 first_entry = first_entry->next; 2900 else { 2901 vm_map_unlock(map); 2902 return (KERN_INVALID_ADDRESS); 2903 } 2904 } 2905 first_iteration = true; 2906 entry = first_entry; 2907 rv = KERN_SUCCESS; 2908 while (entry->start < end) { 2909 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2910 /* 2911 * We have not yet clipped the entry. 2912 */ 2913 entry = vm_map_entry_in_transition(map, start, &end, 2914 holes_ok, entry); 2915 if (entry == NULL) { 2916 if (first_iteration) { 2917 vm_map_unlock(map); 2918 return (KERN_INVALID_ADDRESS); 2919 } 2920 rv = KERN_INVALID_ADDRESS; 2921 break; 2922 } 2923 first_entry = first_iteration ? entry : NULL; 2924 continue; 2925 } 2926 first_iteration = false; 2927 vm_map_clip_start(map, entry, start); 2928 vm_map_clip_end(map, entry, end); 2929 /* 2930 * Mark the entry in case the map lock is released. (See 2931 * above.) 2932 */ 2933 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2934 entry->wiring_thread == NULL, 2935 ("owned map entry %p", entry)); 2936 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2937 entry->wiring_thread = curthread; 2938 /* 2939 * Check the map for holes in the specified region. 2940 * If holes_ok, skip this check. 2941 */ 2942 if (!holes_ok && 2943 (entry->end < end && entry->next->start > entry->end)) { 2944 end = entry->end; 2945 rv = KERN_INVALID_ADDRESS; 2946 break; 2947 } 2948 /* 2949 * If system unwiring, require that the entry is system wired. 2950 */ 2951 if (!user_unwire && 2952 vm_map_entry_system_wired_count(entry) == 0) { 2953 end = entry->end; 2954 rv = KERN_INVALID_ARGUMENT; 2955 break; 2956 } 2957 entry = entry->next; 2958 } 2959 need_wakeup = false; 2960 if (first_entry == NULL && 2961 !vm_map_lookup_entry(map, start, &first_entry)) { 2962 KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); 2963 first_entry = first_entry->next; 2964 } 2965 for (entry = first_entry; entry->start < end; entry = entry->next) { 2966 /* 2967 * If holes_ok was specified, an empty 2968 * space in the unwired region could have been mapped 2969 * while the map lock was dropped for draining 2970 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2971 * could be simultaneously wiring this new mapping 2972 * entry. Detect these cases and skip any entries 2973 * marked as in transition by us. 2974 */ 2975 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2976 entry->wiring_thread != curthread) { 2977 KASSERT(holes_ok, 2978 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2979 continue; 2980 } 2981 2982 if (rv == KERN_SUCCESS && (!user_unwire || 2983 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2984 if (entry->wired_count == 1) 2985 vm_map_entry_unwire(map, entry); 2986 else 2987 entry->wired_count--; 2988 if (user_unwire) 2989 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2990 } 2991 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2992 ("vm_map_unwire: in-transition flag missing %p", entry)); 2993 KASSERT(entry->wiring_thread == curthread, 2994 ("vm_map_unwire: alien wire %p", entry)); 2995 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2996 entry->wiring_thread = NULL; 2997 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2998 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2999 need_wakeup = true; 3000 } 3001 vm_map_try_merge_entries(map, entry->prev, entry); 3002 } 3003 vm_map_try_merge_entries(map, entry->prev, entry); 3004 vm_map_unlock(map); 3005 if (need_wakeup) 3006 vm_map_wakeup(map); 3007 return (rv); 3008 } 3009 3010 static void 3011 vm_map_wire_user_count_sub(u_long npages) 3012 { 3013 3014 atomic_subtract_long(&vm_user_wire_count, npages); 3015 } 3016 3017 static bool 3018 vm_map_wire_user_count_add(u_long npages) 3019 { 3020 u_long wired; 3021 3022 wired = vm_user_wire_count; 3023 do { 3024 if (npages + wired > vm_page_max_user_wired) 3025 return (false); 3026 } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, 3027 npages + wired)); 3028 3029 return (true); 3030 } 3031 3032 /* 3033 * vm_map_wire_entry_failure: 3034 * 3035 * Handle a wiring failure on the given entry. 3036 * 3037 * The map should be locked. 3038 */ 3039 static void 3040 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 3041 vm_offset_t failed_addr) 3042 { 3043 3044 VM_MAP_ASSERT_LOCKED(map); 3045 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 3046 entry->wired_count == 1, 3047 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 3048 KASSERT(failed_addr < entry->end, 3049 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 3050 3051 /* 3052 * If any pages at the start of this entry were successfully wired, 3053 * then unwire them. 3054 */ 3055 if (failed_addr > entry->start) { 3056 pmap_unwire(map->pmap, entry->start, failed_addr); 3057 vm_object_unwire(entry->object.vm_object, entry->offset, 3058 failed_addr - entry->start, PQ_ACTIVE); 3059 } 3060 3061 /* 3062 * Assign an out-of-range value to represent the failure to wire this 3063 * entry. 3064 */ 3065 entry->wired_count = -1; 3066 } 3067 3068 int 3069 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3070 { 3071 int rv; 3072 3073 vm_map_lock(map); 3074 rv = vm_map_wire_locked(map, start, end, flags); 3075 vm_map_unlock(map); 3076 return (rv); 3077 } 3078 3079 3080 /* 3081 * vm_map_wire_locked: 3082 * 3083 * Implements both kernel and user wiring. Returns with the map locked, 3084 * the map lock may be dropped. 3085 */ 3086 int 3087 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3088 { 3089 vm_map_entry_t entry, first_entry, tmp_entry; 3090 vm_offset_t faddr, saved_end, saved_start; 3091 u_long npages; 3092 u_int last_timestamp; 3093 int rv; 3094 bool first_iteration, holes_ok, need_wakeup, user_wire; 3095 vm_prot_t prot; 3096 3097 VM_MAP_ASSERT_LOCKED(map); 3098 3099 if (start == end) 3100 return (KERN_SUCCESS); 3101 prot = 0; 3102 if (flags & VM_MAP_WIRE_WRITE) 3103 prot |= VM_PROT_WRITE; 3104 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3105 user_wire = (flags & VM_MAP_WIRE_USER) != 0; 3106 VM_MAP_RANGE_CHECK(map, start, end); 3107 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3108 if (holes_ok) 3109 first_entry = first_entry->next; 3110 else 3111 return (KERN_INVALID_ADDRESS); 3112 } 3113 first_iteration = true; 3114 entry = first_entry; 3115 while (entry->start < end) { 3116 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3117 /* 3118 * We have not yet clipped the entry. 3119 */ 3120 entry = vm_map_entry_in_transition(map, start, &end, 3121 holes_ok, entry); 3122 if (entry == NULL) { 3123 if (first_iteration) 3124 return (KERN_INVALID_ADDRESS); 3125 rv = KERN_INVALID_ADDRESS; 3126 goto done; 3127 } 3128 first_entry = first_iteration ? entry : NULL; 3129 continue; 3130 } 3131 first_iteration = false; 3132 vm_map_clip_start(map, entry, start); 3133 vm_map_clip_end(map, entry, end); 3134 /* 3135 * Mark the entry in case the map lock is released. (See 3136 * above.) 3137 */ 3138 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3139 entry->wiring_thread == NULL, 3140 ("owned map entry %p", entry)); 3141 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3142 entry->wiring_thread = curthread; 3143 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3144 || (entry->protection & prot) != prot) { 3145 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3146 if (!holes_ok) { 3147 end = entry->end; 3148 rv = KERN_INVALID_ADDRESS; 3149 goto done; 3150 } 3151 } else if (entry->wired_count == 0) { 3152 entry->wired_count++; 3153 3154 npages = atop(entry->end - entry->start); 3155 if (user_wire && !vm_map_wire_user_count_add(npages)) { 3156 vm_map_wire_entry_failure(map, entry, 3157 entry->start); 3158 end = entry->end; 3159 rv = KERN_RESOURCE_SHORTAGE; 3160 goto done; 3161 } 3162 3163 /* 3164 * Release the map lock, relying on the in-transition 3165 * mark. Mark the map busy for fork. 3166 */ 3167 saved_start = entry->start; 3168 saved_end = entry->end; 3169 last_timestamp = map->timestamp; 3170 vm_map_busy(map); 3171 vm_map_unlock(map); 3172 3173 faddr = saved_start; 3174 do { 3175 /* 3176 * Simulate a fault to get the page and enter 3177 * it into the physical map. 3178 */ 3179 if ((rv = vm_fault(map, faddr, 3180 VM_PROT_NONE, VM_FAULT_WIRE, NULL)) != 3181 KERN_SUCCESS) 3182 break; 3183 } while ((faddr += PAGE_SIZE) < saved_end); 3184 vm_map_lock(map); 3185 vm_map_unbusy(map); 3186 if (last_timestamp + 1 != map->timestamp) { 3187 /* 3188 * Look again for the entry because the map was 3189 * modified while it was unlocked. The entry 3190 * may have been clipped, but NOT merged or 3191 * deleted. 3192 */ 3193 if (!vm_map_lookup_entry(map, saved_start, 3194 &tmp_entry)) 3195 KASSERT(false, 3196 ("vm_map_wire: lookup failed")); 3197 if (entry == first_entry) 3198 first_entry = tmp_entry; 3199 else 3200 first_entry = NULL; 3201 entry = tmp_entry; 3202 while (entry->end < saved_end) { 3203 /* 3204 * In case of failure, handle entries 3205 * that were not fully wired here; 3206 * fully wired entries are handled 3207 * later. 3208 */ 3209 if (rv != KERN_SUCCESS && 3210 faddr < entry->end) 3211 vm_map_wire_entry_failure(map, 3212 entry, faddr); 3213 entry = entry->next; 3214 } 3215 } 3216 if (rv != KERN_SUCCESS) { 3217 vm_map_wire_entry_failure(map, entry, faddr); 3218 if (user_wire) 3219 vm_map_wire_user_count_sub(npages); 3220 end = entry->end; 3221 goto done; 3222 } 3223 } else if (!user_wire || 3224 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3225 entry->wired_count++; 3226 } 3227 /* 3228 * Check the map for holes in the specified region. 3229 * If holes_ok was specified, skip this check. 3230 */ 3231 if (!holes_ok && 3232 entry->end < end && entry->next->start > entry->end) { 3233 end = entry->end; 3234 rv = KERN_INVALID_ADDRESS; 3235 goto done; 3236 } 3237 entry = entry->next; 3238 } 3239 rv = KERN_SUCCESS; 3240 done: 3241 need_wakeup = false; 3242 if (first_entry == NULL && 3243 !vm_map_lookup_entry(map, start, &first_entry)) { 3244 KASSERT(holes_ok, ("vm_map_wire: lookup failed")); 3245 first_entry = first_entry->next; 3246 } 3247 for (entry = first_entry; entry->start < end; entry = entry->next) { 3248 /* 3249 * If holes_ok was specified, an empty 3250 * space in the unwired region could have been mapped 3251 * while the map lock was dropped for faulting in the 3252 * pages or draining MAP_ENTRY_IN_TRANSITION. 3253 * Moreover, another thread could be simultaneously 3254 * wiring this new mapping entry. Detect these cases 3255 * and skip any entries marked as in transition not by us. 3256 */ 3257 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3258 entry->wiring_thread != curthread) { 3259 KASSERT(holes_ok, 3260 ("vm_map_wire: !HOLESOK and new/changed entry")); 3261 continue; 3262 } 3263 3264 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { 3265 /* do nothing */ 3266 } else if (rv == KERN_SUCCESS) { 3267 if (user_wire) 3268 entry->eflags |= MAP_ENTRY_USER_WIRED; 3269 } else if (entry->wired_count == -1) { 3270 /* 3271 * Wiring failed on this entry. Thus, unwiring is 3272 * unnecessary. 3273 */ 3274 entry->wired_count = 0; 3275 } else if (!user_wire || 3276 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3277 /* 3278 * Undo the wiring. Wiring succeeded on this entry 3279 * but failed on a later entry. 3280 */ 3281 if (entry->wired_count == 1) { 3282 vm_map_entry_unwire(map, entry); 3283 if (user_wire) 3284 vm_map_wire_user_count_sub( 3285 atop(entry->end - entry->start)); 3286 } else 3287 entry->wired_count--; 3288 } 3289 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3290 ("vm_map_wire: in-transition flag missing %p", entry)); 3291 KASSERT(entry->wiring_thread == curthread, 3292 ("vm_map_wire: alien wire %p", entry)); 3293 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3294 MAP_ENTRY_WIRE_SKIPPED); 3295 entry->wiring_thread = NULL; 3296 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3297 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3298 need_wakeup = true; 3299 } 3300 vm_map_try_merge_entries(map, entry->prev, entry); 3301 } 3302 vm_map_try_merge_entries(map, entry->prev, entry); 3303 if (need_wakeup) 3304 vm_map_wakeup(map); 3305 return (rv); 3306 } 3307 3308 /* 3309 * vm_map_sync 3310 * 3311 * Push any dirty cached pages in the address range to their pager. 3312 * If syncio is TRUE, dirty pages are written synchronously. 3313 * If invalidate is TRUE, any cached pages are freed as well. 3314 * 3315 * If the size of the region from start to end is zero, we are 3316 * supposed to flush all modified pages within the region containing 3317 * start. Unfortunately, a region can be split or coalesced with 3318 * neighboring regions, making it difficult to determine what the 3319 * original region was. Therefore, we approximate this requirement by 3320 * flushing the current region containing start. 3321 * 3322 * Returns an error if any part of the specified range is not mapped. 3323 */ 3324 int 3325 vm_map_sync( 3326 vm_map_t map, 3327 vm_offset_t start, 3328 vm_offset_t end, 3329 boolean_t syncio, 3330 boolean_t invalidate) 3331 { 3332 vm_map_entry_t current; 3333 vm_map_entry_t entry; 3334 vm_size_t size; 3335 vm_object_t object; 3336 vm_ooffset_t offset; 3337 unsigned int last_timestamp; 3338 boolean_t failed; 3339 3340 vm_map_lock_read(map); 3341 VM_MAP_RANGE_CHECK(map, start, end); 3342 if (!vm_map_lookup_entry(map, start, &entry)) { 3343 vm_map_unlock_read(map); 3344 return (KERN_INVALID_ADDRESS); 3345 } else if (start == end) { 3346 start = entry->start; 3347 end = entry->end; 3348 } 3349 /* 3350 * Make a first pass to check for user-wired memory and holes. 3351 */ 3352 for (current = entry; current->start < end; current = current->next) { 3353 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 3354 vm_map_unlock_read(map); 3355 return (KERN_INVALID_ARGUMENT); 3356 } 3357 if (end > current->end && 3358 current->end != current->next->start) { 3359 vm_map_unlock_read(map); 3360 return (KERN_INVALID_ADDRESS); 3361 } 3362 } 3363 3364 if (invalidate) 3365 pmap_remove(map->pmap, start, end); 3366 failed = FALSE; 3367 3368 /* 3369 * Make a second pass, cleaning/uncaching pages from the indicated 3370 * objects as we go. 3371 */ 3372 for (current = entry; current->start < end;) { 3373 offset = current->offset + (start - current->start); 3374 size = (end <= current->end ? end : current->end) - start; 3375 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 3376 vm_map_t smap; 3377 vm_map_entry_t tentry; 3378 vm_size_t tsize; 3379 3380 smap = current->object.sub_map; 3381 vm_map_lock_read(smap); 3382 (void) vm_map_lookup_entry(smap, offset, &tentry); 3383 tsize = tentry->end - offset; 3384 if (tsize < size) 3385 size = tsize; 3386 object = tentry->object.vm_object; 3387 offset = tentry->offset + (offset - tentry->start); 3388 vm_map_unlock_read(smap); 3389 } else { 3390 object = current->object.vm_object; 3391 } 3392 vm_object_reference(object); 3393 last_timestamp = map->timestamp; 3394 vm_map_unlock_read(map); 3395 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3396 failed = TRUE; 3397 start += size; 3398 vm_object_deallocate(object); 3399 vm_map_lock_read(map); 3400 if (last_timestamp == map->timestamp || 3401 !vm_map_lookup_entry(map, start, ¤t)) 3402 current = current->next; 3403 } 3404 3405 vm_map_unlock_read(map); 3406 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3407 } 3408 3409 /* 3410 * vm_map_entry_unwire: [ internal use only ] 3411 * 3412 * Make the region specified by this entry pageable. 3413 * 3414 * The map in question should be locked. 3415 * [This is the reason for this routine's existence.] 3416 */ 3417 static void 3418 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3419 { 3420 vm_size_t size; 3421 3422 VM_MAP_ASSERT_LOCKED(map); 3423 KASSERT(entry->wired_count > 0, 3424 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3425 3426 size = entry->end - entry->start; 3427 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) 3428 vm_map_wire_user_count_sub(atop(size)); 3429 pmap_unwire(map->pmap, entry->start, entry->end); 3430 vm_object_unwire(entry->object.vm_object, entry->offset, size, 3431 PQ_ACTIVE); 3432 entry->wired_count = 0; 3433 } 3434 3435 static void 3436 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3437 { 3438 3439 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3440 vm_object_deallocate(entry->object.vm_object); 3441 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3442 } 3443 3444 /* 3445 * vm_map_entry_delete: [ internal use only ] 3446 * 3447 * Deallocate the given entry from the target map. 3448 */ 3449 static void 3450 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3451 { 3452 vm_object_t object; 3453 vm_pindex_t offidxstart, offidxend, count, size1; 3454 vm_size_t size; 3455 3456 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3457 object = entry->object.vm_object; 3458 3459 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3460 MPASS(entry->cred == NULL); 3461 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3462 MPASS(object == NULL); 3463 vm_map_entry_deallocate(entry, map->system_map); 3464 return; 3465 } 3466 3467 size = entry->end - entry->start; 3468 map->size -= size; 3469 3470 if (entry->cred != NULL) { 3471 swap_release_by_cred(size, entry->cred); 3472 crfree(entry->cred); 3473 } 3474 3475 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 3476 (object != NULL)) { 3477 KASSERT(entry->cred == NULL || object->cred == NULL || 3478 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3479 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3480 count = atop(size); 3481 offidxstart = OFF_TO_IDX(entry->offset); 3482 offidxend = offidxstart + count; 3483 VM_OBJECT_WLOCK(object); 3484 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 3485 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 3486 object == kernel_object)) { 3487 vm_object_collapse(object); 3488 3489 /* 3490 * The option OBJPR_NOTMAPPED can be passed here 3491 * because vm_map_delete() already performed 3492 * pmap_remove() on the only mapping to this range 3493 * of pages. 3494 */ 3495 vm_object_page_remove(object, offidxstart, offidxend, 3496 OBJPR_NOTMAPPED); 3497 if (object->type == OBJT_SWAP) 3498 swap_pager_freespace(object, offidxstart, 3499 count); 3500 if (offidxend >= object->size && 3501 offidxstart < object->size) { 3502 size1 = object->size; 3503 object->size = offidxstart; 3504 if (object->cred != NULL) { 3505 size1 -= object->size; 3506 KASSERT(object->charge >= ptoa(size1), 3507 ("object %p charge < 0", object)); 3508 swap_release_by_cred(ptoa(size1), 3509 object->cred); 3510 object->charge -= ptoa(size1); 3511 } 3512 } 3513 } 3514 VM_OBJECT_WUNLOCK(object); 3515 } else 3516 entry->object.vm_object = NULL; 3517 if (map->system_map) 3518 vm_map_entry_deallocate(entry, TRUE); 3519 else { 3520 entry->next = curthread->td_map_def_user; 3521 curthread->td_map_def_user = entry; 3522 } 3523 } 3524 3525 /* 3526 * vm_map_delete: [ internal use only ] 3527 * 3528 * Deallocates the given address range from the target 3529 * map. 3530 */ 3531 int 3532 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3533 { 3534 vm_map_entry_t entry; 3535 vm_map_entry_t first_entry; 3536 3537 VM_MAP_ASSERT_LOCKED(map); 3538 if (start == end) 3539 return (KERN_SUCCESS); 3540 3541 /* 3542 * Find the start of the region, and clip it 3543 */ 3544 if (!vm_map_lookup_entry(map, start, &first_entry)) 3545 entry = first_entry->next; 3546 else { 3547 entry = first_entry; 3548 vm_map_clip_start(map, entry, start); 3549 } 3550 3551 /* 3552 * Step through all entries in this region 3553 */ 3554 while (entry->start < end) { 3555 vm_map_entry_t next; 3556 3557 /* 3558 * Wait for wiring or unwiring of an entry to complete. 3559 * Also wait for any system wirings to disappear on 3560 * user maps. 3561 */ 3562 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3563 (vm_map_pmap(map) != kernel_pmap && 3564 vm_map_entry_system_wired_count(entry) != 0)) { 3565 unsigned int last_timestamp; 3566 vm_offset_t saved_start; 3567 vm_map_entry_t tmp_entry; 3568 3569 saved_start = entry->start; 3570 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3571 last_timestamp = map->timestamp; 3572 (void) vm_map_unlock_and_wait(map, 0); 3573 vm_map_lock(map); 3574 if (last_timestamp + 1 != map->timestamp) { 3575 /* 3576 * Look again for the entry because the map was 3577 * modified while it was unlocked. 3578 * Specifically, the entry may have been 3579 * clipped, merged, or deleted. 3580 */ 3581 if (!vm_map_lookup_entry(map, saved_start, 3582 &tmp_entry)) 3583 entry = tmp_entry->next; 3584 else { 3585 entry = tmp_entry; 3586 vm_map_clip_start(map, entry, 3587 saved_start); 3588 } 3589 } 3590 continue; 3591 } 3592 vm_map_clip_end(map, entry, end); 3593 3594 next = entry->next; 3595 3596 /* 3597 * Unwire before removing addresses from the pmap; otherwise, 3598 * unwiring will put the entries back in the pmap. 3599 */ 3600 if (entry->wired_count != 0) 3601 vm_map_entry_unwire(map, entry); 3602 3603 /* 3604 * Remove mappings for the pages, but only if the 3605 * mappings could exist. For instance, it does not 3606 * make sense to call pmap_remove() for guard entries. 3607 */ 3608 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 3609 entry->object.vm_object != NULL) 3610 pmap_remove(map->pmap, entry->start, entry->end); 3611 3612 if (entry->end == map->anon_loc) 3613 map->anon_loc = entry->start; 3614 3615 /* 3616 * Delete the entry only after removing all pmap 3617 * entries pointing to its pages. (Otherwise, its 3618 * page frames may be reallocated, and any modify bits 3619 * will be set in the wrong object!) 3620 */ 3621 vm_map_entry_delete(map, entry); 3622 entry = next; 3623 } 3624 return (KERN_SUCCESS); 3625 } 3626 3627 /* 3628 * vm_map_remove: 3629 * 3630 * Remove the given address range from the target map. 3631 * This is the exported form of vm_map_delete. 3632 */ 3633 int 3634 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3635 { 3636 int result; 3637 3638 vm_map_lock(map); 3639 VM_MAP_RANGE_CHECK(map, start, end); 3640 result = vm_map_delete(map, start, end); 3641 vm_map_unlock(map); 3642 return (result); 3643 } 3644 3645 /* 3646 * vm_map_check_protection: 3647 * 3648 * Assert that the target map allows the specified privilege on the 3649 * entire address region given. The entire region must be allocated. 3650 * 3651 * WARNING! This code does not and should not check whether the 3652 * contents of the region is accessible. For example a smaller file 3653 * might be mapped into a larger address space. 3654 * 3655 * NOTE! This code is also called by munmap(). 3656 * 3657 * The map must be locked. A read lock is sufficient. 3658 */ 3659 boolean_t 3660 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3661 vm_prot_t protection) 3662 { 3663 vm_map_entry_t entry; 3664 vm_map_entry_t tmp_entry; 3665 3666 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3667 return (FALSE); 3668 entry = tmp_entry; 3669 3670 while (start < end) { 3671 /* 3672 * No holes allowed! 3673 */ 3674 if (start < entry->start) 3675 return (FALSE); 3676 /* 3677 * Check protection associated with entry. 3678 */ 3679 if ((entry->protection & protection) != protection) 3680 return (FALSE); 3681 /* go to next entry */ 3682 start = entry->end; 3683 entry = entry->next; 3684 } 3685 return (TRUE); 3686 } 3687 3688 /* 3689 * vm_map_copy_entry: 3690 * 3691 * Copies the contents of the source entry to the destination 3692 * entry. The entries *must* be aligned properly. 3693 */ 3694 static void 3695 vm_map_copy_entry( 3696 vm_map_t src_map, 3697 vm_map_t dst_map, 3698 vm_map_entry_t src_entry, 3699 vm_map_entry_t dst_entry, 3700 vm_ooffset_t *fork_charge) 3701 { 3702 vm_object_t src_object; 3703 vm_map_entry_t fake_entry; 3704 vm_offset_t size; 3705 struct ucred *cred; 3706 int charged; 3707 3708 VM_MAP_ASSERT_LOCKED(dst_map); 3709 3710 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3711 return; 3712 3713 if (src_entry->wired_count == 0 || 3714 (src_entry->protection & VM_PROT_WRITE) == 0) { 3715 /* 3716 * If the source entry is marked needs_copy, it is already 3717 * write-protected. 3718 */ 3719 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3720 (src_entry->protection & VM_PROT_WRITE) != 0) { 3721 pmap_protect(src_map->pmap, 3722 src_entry->start, 3723 src_entry->end, 3724 src_entry->protection & ~VM_PROT_WRITE); 3725 } 3726 3727 /* 3728 * Make a copy of the object. 3729 */ 3730 size = src_entry->end - src_entry->start; 3731 if ((src_object = src_entry->object.vm_object) != NULL) { 3732 VM_OBJECT_WLOCK(src_object); 3733 charged = ENTRY_CHARGED(src_entry); 3734 if (src_object->handle == NULL && 3735 (src_object->type == OBJT_DEFAULT || 3736 src_object->type == OBJT_SWAP)) { 3737 vm_object_collapse(src_object); 3738 if ((src_object->flags & (OBJ_NOSPLIT | 3739 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3740 vm_object_split(src_entry); 3741 src_object = 3742 src_entry->object.vm_object; 3743 } 3744 } 3745 vm_object_reference_locked(src_object); 3746 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3747 if (src_entry->cred != NULL && 3748 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3749 KASSERT(src_object->cred == NULL, 3750 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3751 src_object)); 3752 src_object->cred = src_entry->cred; 3753 src_object->charge = size; 3754 } 3755 VM_OBJECT_WUNLOCK(src_object); 3756 dst_entry->object.vm_object = src_object; 3757 if (charged) { 3758 cred = curthread->td_ucred; 3759 crhold(cred); 3760 dst_entry->cred = cred; 3761 *fork_charge += size; 3762 if (!(src_entry->eflags & 3763 MAP_ENTRY_NEEDS_COPY)) { 3764 crhold(cred); 3765 src_entry->cred = cred; 3766 *fork_charge += size; 3767 } 3768 } 3769 src_entry->eflags |= MAP_ENTRY_COW | 3770 MAP_ENTRY_NEEDS_COPY; 3771 dst_entry->eflags |= MAP_ENTRY_COW | 3772 MAP_ENTRY_NEEDS_COPY; 3773 dst_entry->offset = src_entry->offset; 3774 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { 3775 /* 3776 * MAP_ENTRY_WRITECNT cannot 3777 * indicate write reference from 3778 * src_entry, since the entry is 3779 * marked as needs copy. Allocate a 3780 * fake entry that is used to 3781 * decrement object->un_pager writecount 3782 * at the appropriate time. Attach 3783 * fake_entry to the deferred list. 3784 */ 3785 fake_entry = vm_map_entry_create(dst_map); 3786 fake_entry->eflags = MAP_ENTRY_WRITECNT; 3787 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; 3788 vm_object_reference(src_object); 3789 fake_entry->object.vm_object = src_object; 3790 fake_entry->start = src_entry->start; 3791 fake_entry->end = src_entry->end; 3792 fake_entry->next = curthread->td_map_def_user; 3793 curthread->td_map_def_user = fake_entry; 3794 } 3795 3796 pmap_copy(dst_map->pmap, src_map->pmap, 3797 dst_entry->start, dst_entry->end - dst_entry->start, 3798 src_entry->start); 3799 } else { 3800 dst_entry->object.vm_object = NULL; 3801 dst_entry->offset = 0; 3802 if (src_entry->cred != NULL) { 3803 dst_entry->cred = curthread->td_ucred; 3804 crhold(dst_entry->cred); 3805 *fork_charge += size; 3806 } 3807 } 3808 } else { 3809 /* 3810 * We don't want to make writeable wired pages copy-on-write. 3811 * Immediately copy these pages into the new map by simulating 3812 * page faults. The new pages are pageable. 3813 */ 3814 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3815 fork_charge); 3816 } 3817 } 3818 3819 /* 3820 * vmspace_map_entry_forked: 3821 * Update the newly-forked vmspace each time a map entry is inherited 3822 * or copied. The values for vm_dsize and vm_tsize are approximate 3823 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3824 */ 3825 static void 3826 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3827 vm_map_entry_t entry) 3828 { 3829 vm_size_t entrysize; 3830 vm_offset_t newend; 3831 3832 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3833 return; 3834 entrysize = entry->end - entry->start; 3835 vm2->vm_map.size += entrysize; 3836 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3837 vm2->vm_ssize += btoc(entrysize); 3838 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3839 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3840 newend = MIN(entry->end, 3841 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3842 vm2->vm_dsize += btoc(newend - entry->start); 3843 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3844 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3845 newend = MIN(entry->end, 3846 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3847 vm2->vm_tsize += btoc(newend - entry->start); 3848 } 3849 } 3850 3851 /* 3852 * vmspace_fork: 3853 * Create a new process vmspace structure and vm_map 3854 * based on those of an existing process. The new map 3855 * is based on the old map, according to the inheritance 3856 * values on the regions in that map. 3857 * 3858 * XXX It might be worth coalescing the entries added to the new vmspace. 3859 * 3860 * The source map must not be locked. 3861 */ 3862 struct vmspace * 3863 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3864 { 3865 struct vmspace *vm2; 3866 vm_map_t new_map, old_map; 3867 vm_map_entry_t new_entry, old_entry; 3868 vm_object_t object; 3869 int error, locked; 3870 vm_inherit_t inh; 3871 3872 old_map = &vm1->vm_map; 3873 /* Copy immutable fields of vm1 to vm2. */ 3874 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 3875 pmap_pinit); 3876 if (vm2 == NULL) 3877 return (NULL); 3878 3879 vm2->vm_taddr = vm1->vm_taddr; 3880 vm2->vm_daddr = vm1->vm_daddr; 3881 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3882 vm_map_lock(old_map); 3883 if (old_map->busy) 3884 vm_map_wait_busy(old_map); 3885 new_map = &vm2->vm_map; 3886 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3887 KASSERT(locked, ("vmspace_fork: lock failed")); 3888 3889 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 3890 if (error != 0) { 3891 sx_xunlock(&old_map->lock); 3892 sx_xunlock(&new_map->lock); 3893 vm_map_process_deferred(); 3894 vmspace_free(vm2); 3895 return (NULL); 3896 } 3897 3898 new_map->anon_loc = old_map->anon_loc; 3899 3900 old_entry = old_map->header.next; 3901 3902 while (old_entry != &old_map->header) { 3903 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3904 panic("vm_map_fork: encountered a submap"); 3905 3906 inh = old_entry->inheritance; 3907 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3908 inh != VM_INHERIT_NONE) 3909 inh = VM_INHERIT_COPY; 3910 3911 switch (inh) { 3912 case VM_INHERIT_NONE: 3913 break; 3914 3915 case VM_INHERIT_SHARE: 3916 /* 3917 * Clone the entry, creating the shared object if necessary. 3918 */ 3919 object = old_entry->object.vm_object; 3920 if (object == NULL) { 3921 vm_map_entry_back(old_entry); 3922 object = old_entry->object.vm_object; 3923 } 3924 3925 /* 3926 * Add the reference before calling vm_object_shadow 3927 * to insure that a shadow object is created. 3928 */ 3929 vm_object_reference(object); 3930 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3931 vm_object_shadow(&old_entry->object.vm_object, 3932 &old_entry->offset, 3933 old_entry->end - old_entry->start); 3934 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3935 /* Transfer the second reference too. */ 3936 vm_object_reference( 3937 old_entry->object.vm_object); 3938 3939 /* 3940 * As in vm_map_merged_neighbor_dispose(), 3941 * the vnode lock will not be acquired in 3942 * this call to vm_object_deallocate(). 3943 */ 3944 vm_object_deallocate(object); 3945 object = old_entry->object.vm_object; 3946 } 3947 VM_OBJECT_WLOCK(object); 3948 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3949 if (old_entry->cred != NULL) { 3950 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3951 object->cred = old_entry->cred; 3952 object->charge = old_entry->end - old_entry->start; 3953 old_entry->cred = NULL; 3954 } 3955 3956 /* 3957 * Assert the correct state of the vnode 3958 * v_writecount while the object is locked, to 3959 * not relock it later for the assertion 3960 * correctness. 3961 */ 3962 if (old_entry->eflags & MAP_ENTRY_WRITECNT && 3963 object->type == OBJT_VNODE) { 3964 KASSERT(((struct vnode *)object->handle)-> 3965 v_writecount > 0, 3966 ("vmspace_fork: v_writecount %p", object)); 3967 KASSERT(object->un_pager.vnp.writemappings > 0, 3968 ("vmspace_fork: vnp.writecount %p", 3969 object)); 3970 } 3971 VM_OBJECT_WUNLOCK(object); 3972 3973 /* 3974 * Clone the entry, referencing the shared object. 3975 */ 3976 new_entry = vm_map_entry_create(new_map); 3977 *new_entry = *old_entry; 3978 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3979 MAP_ENTRY_IN_TRANSITION); 3980 new_entry->wiring_thread = NULL; 3981 new_entry->wired_count = 0; 3982 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { 3983 vm_pager_update_writecount(object, 3984 new_entry->start, new_entry->end); 3985 } 3986 vm_map_entry_set_vnode_text(new_entry, true); 3987 3988 /* 3989 * Insert the entry into the new map -- we know we're 3990 * inserting at the end of the new map. 3991 */ 3992 vm_map_entry_link(new_map, new_entry); 3993 vmspace_map_entry_forked(vm1, vm2, new_entry); 3994 3995 /* 3996 * Update the physical map 3997 */ 3998 pmap_copy(new_map->pmap, old_map->pmap, 3999 new_entry->start, 4000 (old_entry->end - old_entry->start), 4001 old_entry->start); 4002 break; 4003 4004 case VM_INHERIT_COPY: 4005 /* 4006 * Clone the entry and link into the map. 4007 */ 4008 new_entry = vm_map_entry_create(new_map); 4009 *new_entry = *old_entry; 4010 /* 4011 * Copied entry is COW over the old object. 4012 */ 4013 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4014 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT); 4015 new_entry->wiring_thread = NULL; 4016 new_entry->wired_count = 0; 4017 new_entry->object.vm_object = NULL; 4018 new_entry->cred = NULL; 4019 vm_map_entry_link(new_map, new_entry); 4020 vmspace_map_entry_forked(vm1, vm2, new_entry); 4021 vm_map_copy_entry(old_map, new_map, old_entry, 4022 new_entry, fork_charge); 4023 vm_map_entry_set_vnode_text(new_entry, true); 4024 break; 4025 4026 case VM_INHERIT_ZERO: 4027 /* 4028 * Create a new anonymous mapping entry modelled from 4029 * the old one. 4030 */ 4031 new_entry = vm_map_entry_create(new_map); 4032 memset(new_entry, 0, sizeof(*new_entry)); 4033 4034 new_entry->start = old_entry->start; 4035 new_entry->end = old_entry->end; 4036 new_entry->eflags = old_entry->eflags & 4037 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 4038 MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC); 4039 new_entry->protection = old_entry->protection; 4040 new_entry->max_protection = old_entry->max_protection; 4041 new_entry->inheritance = VM_INHERIT_ZERO; 4042 4043 vm_map_entry_link(new_map, new_entry); 4044 vmspace_map_entry_forked(vm1, vm2, new_entry); 4045 4046 new_entry->cred = curthread->td_ucred; 4047 crhold(new_entry->cred); 4048 *fork_charge += (new_entry->end - new_entry->start); 4049 4050 break; 4051 } 4052 old_entry = old_entry->next; 4053 } 4054 /* 4055 * Use inlined vm_map_unlock() to postpone handling the deferred 4056 * map entries, which cannot be done until both old_map and 4057 * new_map locks are released. 4058 */ 4059 sx_xunlock(&old_map->lock); 4060 sx_xunlock(&new_map->lock); 4061 vm_map_process_deferred(); 4062 4063 return (vm2); 4064 } 4065 4066 /* 4067 * Create a process's stack for exec_new_vmspace(). This function is never 4068 * asked to wire the newly created stack. 4069 */ 4070 int 4071 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4072 vm_prot_t prot, vm_prot_t max, int cow) 4073 { 4074 vm_size_t growsize, init_ssize; 4075 rlim_t vmemlim; 4076 int rv; 4077 4078 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4079 growsize = sgrowsiz; 4080 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4081 vm_map_lock(map); 4082 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4083 /* If we would blow our VMEM resource limit, no go */ 4084 if (map->size + init_ssize > vmemlim) { 4085 rv = KERN_NO_SPACE; 4086 goto out; 4087 } 4088 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4089 max, cow); 4090 out: 4091 vm_map_unlock(map); 4092 return (rv); 4093 } 4094 4095 static int stack_guard_page = 1; 4096 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4097 &stack_guard_page, 0, 4098 "Specifies the number of guard pages for a stack that grows"); 4099 4100 static int 4101 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4102 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4103 { 4104 vm_map_entry_t new_entry, prev_entry; 4105 vm_offset_t bot, gap_bot, gap_top, top; 4106 vm_size_t init_ssize, sgp; 4107 int orient, rv; 4108 4109 /* 4110 * The stack orientation is piggybacked with the cow argument. 4111 * Extract it into orient and mask the cow argument so that we 4112 * don't pass it around further. 4113 */ 4114 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 4115 KASSERT(orient != 0, ("No stack grow direction")); 4116 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 4117 ("bi-dir stack")); 4118 4119 if (addrbos < vm_map_min(map) || 4120 addrbos + max_ssize > vm_map_max(map) || 4121 addrbos + max_ssize <= addrbos) 4122 return (KERN_INVALID_ADDRESS); 4123 sgp = (curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ? 0 : 4124 (vm_size_t)stack_guard_page * PAGE_SIZE; 4125 if (sgp >= max_ssize) 4126 return (KERN_INVALID_ARGUMENT); 4127 4128 init_ssize = growsize; 4129 if (max_ssize < init_ssize + sgp) 4130 init_ssize = max_ssize - sgp; 4131 4132 /* If addr is already mapped, no go */ 4133 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4134 return (KERN_NO_SPACE); 4135 4136 /* 4137 * If we can't accommodate max_ssize in the current mapping, no go. 4138 */ 4139 if (prev_entry->next->start < addrbos + max_ssize) 4140 return (KERN_NO_SPACE); 4141 4142 /* 4143 * We initially map a stack of only init_ssize. We will grow as 4144 * needed later. Depending on the orientation of the stack (i.e. 4145 * the grow direction) we either map at the top of the range, the 4146 * bottom of the range or in the middle. 4147 * 4148 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4149 * and cow to be 0. Possibly we should eliminate these as input 4150 * parameters, and just pass these values here in the insert call. 4151 */ 4152 if (orient == MAP_STACK_GROWS_DOWN) { 4153 bot = addrbos + max_ssize - init_ssize; 4154 top = bot + init_ssize; 4155 gap_bot = addrbos; 4156 gap_top = bot; 4157 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 4158 bot = addrbos; 4159 top = bot + init_ssize; 4160 gap_bot = top; 4161 gap_top = addrbos + max_ssize; 4162 } 4163 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 4164 if (rv != KERN_SUCCESS) 4165 return (rv); 4166 new_entry = prev_entry->next; 4167 KASSERT(new_entry->end == top || new_entry->start == bot, 4168 ("Bad entry start/end for new stack entry")); 4169 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 4170 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4171 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4172 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 4173 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 4174 ("new entry lacks MAP_ENTRY_GROWS_UP")); 4175 if (gap_bot == gap_top) 4176 return (KERN_SUCCESS); 4177 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4178 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 4179 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 4180 if (rv == KERN_SUCCESS) { 4181 /* 4182 * Gap can never successfully handle a fault, so 4183 * read-ahead logic is never used for it. Re-use 4184 * next_read of the gap entry to store 4185 * stack_guard_page for vm_map_growstack(). 4186 */ 4187 if (orient == MAP_STACK_GROWS_DOWN) 4188 new_entry->prev->next_read = sgp; 4189 else 4190 new_entry->next->next_read = sgp; 4191 } else { 4192 (void)vm_map_delete(map, bot, top); 4193 } 4194 return (rv); 4195 } 4196 4197 /* 4198 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4199 * successfully grow the stack. 4200 */ 4201 static int 4202 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4203 { 4204 vm_map_entry_t stack_entry; 4205 struct proc *p; 4206 struct vmspace *vm; 4207 struct ucred *cred; 4208 vm_offset_t gap_end, gap_start, grow_start; 4209 vm_size_t grow_amount, guard, max_grow; 4210 rlim_t lmemlim, stacklim, vmemlim; 4211 int rv, rv1; 4212 bool gap_deleted, grow_down, is_procstack; 4213 #ifdef notyet 4214 uint64_t limit; 4215 #endif 4216 #ifdef RACCT 4217 int error; 4218 #endif 4219 4220 p = curproc; 4221 vm = p->p_vmspace; 4222 4223 /* 4224 * Disallow stack growth when the access is performed by a 4225 * debugger or AIO daemon. The reason is that the wrong 4226 * resource limits are applied. 4227 */ 4228 if (p != initproc && (map != &p->p_vmspace->vm_map || 4229 p->p_textvp == NULL)) 4230 return (KERN_FAILURE); 4231 4232 MPASS(!map->system_map); 4233 4234 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4235 stacklim = lim_cur(curthread, RLIMIT_STACK); 4236 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4237 retry: 4238 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4239 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4240 return (KERN_FAILURE); 4241 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4242 return (KERN_SUCCESS); 4243 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 4244 stack_entry = gap_entry->next; 4245 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4246 stack_entry->start != gap_entry->end) 4247 return (KERN_FAILURE); 4248 grow_amount = round_page(stack_entry->start - addr); 4249 grow_down = true; 4250 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 4251 stack_entry = gap_entry->prev; 4252 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 4253 stack_entry->end != gap_entry->start) 4254 return (KERN_FAILURE); 4255 grow_amount = round_page(addr + 1 - stack_entry->end); 4256 grow_down = false; 4257 } else { 4258 return (KERN_FAILURE); 4259 } 4260 guard = (curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ? 0 : 4261 gap_entry->next_read; 4262 max_grow = gap_entry->end - gap_entry->start; 4263 if (guard > max_grow) 4264 return (KERN_NO_SPACE); 4265 max_grow -= guard; 4266 if (grow_amount > max_grow) 4267 return (KERN_NO_SPACE); 4268 4269 /* 4270 * If this is the main process stack, see if we're over the stack 4271 * limit. 4272 */ 4273 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4274 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 4275 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4276 return (KERN_NO_SPACE); 4277 4278 #ifdef RACCT 4279 if (racct_enable) { 4280 PROC_LOCK(p); 4281 if (is_procstack && racct_set(p, RACCT_STACK, 4282 ctob(vm->vm_ssize) + grow_amount)) { 4283 PROC_UNLOCK(p); 4284 return (KERN_NO_SPACE); 4285 } 4286 PROC_UNLOCK(p); 4287 } 4288 #endif 4289 4290 grow_amount = roundup(grow_amount, sgrowsiz); 4291 if (grow_amount > max_grow) 4292 grow_amount = max_grow; 4293 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4294 grow_amount = trunc_page((vm_size_t)stacklim) - 4295 ctob(vm->vm_ssize); 4296 } 4297 4298 #ifdef notyet 4299 PROC_LOCK(p); 4300 limit = racct_get_available(p, RACCT_STACK); 4301 PROC_UNLOCK(p); 4302 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4303 grow_amount = limit - ctob(vm->vm_ssize); 4304 #endif 4305 4306 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4307 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4308 rv = KERN_NO_SPACE; 4309 goto out; 4310 } 4311 #ifdef RACCT 4312 if (racct_enable) { 4313 PROC_LOCK(p); 4314 if (racct_set(p, RACCT_MEMLOCK, 4315 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4316 PROC_UNLOCK(p); 4317 rv = KERN_NO_SPACE; 4318 goto out; 4319 } 4320 PROC_UNLOCK(p); 4321 } 4322 #endif 4323 } 4324 4325 /* If we would blow our VMEM resource limit, no go */ 4326 if (map->size + grow_amount > vmemlim) { 4327 rv = KERN_NO_SPACE; 4328 goto out; 4329 } 4330 #ifdef RACCT 4331 if (racct_enable) { 4332 PROC_LOCK(p); 4333 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4334 PROC_UNLOCK(p); 4335 rv = KERN_NO_SPACE; 4336 goto out; 4337 } 4338 PROC_UNLOCK(p); 4339 } 4340 #endif 4341 4342 if (vm_map_lock_upgrade(map)) { 4343 gap_entry = NULL; 4344 vm_map_lock_read(map); 4345 goto retry; 4346 } 4347 4348 if (grow_down) { 4349 grow_start = gap_entry->end - grow_amount; 4350 if (gap_entry->start + grow_amount == gap_entry->end) { 4351 gap_start = gap_entry->start; 4352 gap_end = gap_entry->end; 4353 vm_map_entry_delete(map, gap_entry); 4354 gap_deleted = true; 4355 } else { 4356 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4357 vm_map_entry_resize(map, gap_entry, -grow_amount); 4358 gap_deleted = false; 4359 } 4360 rv = vm_map_insert(map, NULL, 0, grow_start, 4361 grow_start + grow_amount, 4362 stack_entry->protection, stack_entry->max_protection, 4363 MAP_STACK_GROWS_DOWN); 4364 if (rv != KERN_SUCCESS) { 4365 if (gap_deleted) { 4366 rv1 = vm_map_insert(map, NULL, 0, gap_start, 4367 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4368 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 4369 MPASS(rv1 == KERN_SUCCESS); 4370 } else 4371 vm_map_entry_resize(map, gap_entry, 4372 grow_amount); 4373 } 4374 } else { 4375 grow_start = stack_entry->end; 4376 cred = stack_entry->cred; 4377 if (cred == NULL && stack_entry->object.vm_object != NULL) 4378 cred = stack_entry->object.vm_object->cred; 4379 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 4380 rv = KERN_NO_SPACE; 4381 /* Grow the underlying object if applicable. */ 4382 else if (stack_entry->object.vm_object == NULL || 4383 vm_object_coalesce(stack_entry->object.vm_object, 4384 stack_entry->offset, 4385 (vm_size_t)(stack_entry->end - stack_entry->start), 4386 grow_amount, cred != NULL)) { 4387 if (gap_entry->start + grow_amount == gap_entry->end) { 4388 vm_map_entry_delete(map, gap_entry); 4389 vm_map_entry_resize(map, stack_entry, 4390 grow_amount); 4391 } else { 4392 gap_entry->start += grow_amount; 4393 stack_entry->end += grow_amount; 4394 } 4395 map->size += grow_amount; 4396 rv = KERN_SUCCESS; 4397 } else 4398 rv = KERN_FAILURE; 4399 } 4400 if (rv == KERN_SUCCESS && is_procstack) 4401 vm->vm_ssize += btoc(grow_amount); 4402 4403 /* 4404 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4405 */ 4406 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4407 rv = vm_map_wire_locked(map, grow_start, 4408 grow_start + grow_amount, 4409 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4410 } 4411 vm_map_lock_downgrade(map); 4412 4413 out: 4414 #ifdef RACCT 4415 if (racct_enable && rv != KERN_SUCCESS) { 4416 PROC_LOCK(p); 4417 error = racct_set(p, RACCT_VMEM, map->size); 4418 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4419 if (!old_mlock) { 4420 error = racct_set(p, RACCT_MEMLOCK, 4421 ptoa(pmap_wired_count(map->pmap))); 4422 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4423 } 4424 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4425 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4426 PROC_UNLOCK(p); 4427 } 4428 #endif 4429 4430 return (rv); 4431 } 4432 4433 /* 4434 * Unshare the specified VM space for exec. If other processes are 4435 * mapped to it, then create a new one. The new vmspace is null. 4436 */ 4437 int 4438 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4439 { 4440 struct vmspace *oldvmspace = p->p_vmspace; 4441 struct vmspace *newvmspace; 4442 4443 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4444 ("vmspace_exec recursed")); 4445 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4446 if (newvmspace == NULL) 4447 return (ENOMEM); 4448 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4449 /* 4450 * This code is written like this for prototype purposes. The 4451 * goal is to avoid running down the vmspace here, but let the 4452 * other process's that are still using the vmspace to finally 4453 * run it down. Even though there is little or no chance of blocking 4454 * here, it is a good idea to keep this form for future mods. 4455 */ 4456 PROC_VMSPACE_LOCK(p); 4457 p->p_vmspace = newvmspace; 4458 PROC_VMSPACE_UNLOCK(p); 4459 if (p == curthread->td_proc) 4460 pmap_activate(curthread); 4461 curthread->td_pflags |= TDP_EXECVMSPC; 4462 return (0); 4463 } 4464 4465 /* 4466 * Unshare the specified VM space for forcing COW. This 4467 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4468 */ 4469 int 4470 vmspace_unshare(struct proc *p) 4471 { 4472 struct vmspace *oldvmspace = p->p_vmspace; 4473 struct vmspace *newvmspace; 4474 vm_ooffset_t fork_charge; 4475 4476 if (oldvmspace->vm_refcnt == 1) 4477 return (0); 4478 fork_charge = 0; 4479 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4480 if (newvmspace == NULL) 4481 return (ENOMEM); 4482 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4483 vmspace_free(newvmspace); 4484 return (ENOMEM); 4485 } 4486 PROC_VMSPACE_LOCK(p); 4487 p->p_vmspace = newvmspace; 4488 PROC_VMSPACE_UNLOCK(p); 4489 if (p == curthread->td_proc) 4490 pmap_activate(curthread); 4491 vmspace_free(oldvmspace); 4492 return (0); 4493 } 4494 4495 /* 4496 * vm_map_lookup: 4497 * 4498 * Finds the VM object, offset, and 4499 * protection for a given virtual address in the 4500 * specified map, assuming a page fault of the 4501 * type specified. 4502 * 4503 * Leaves the map in question locked for read; return 4504 * values are guaranteed until a vm_map_lookup_done 4505 * call is performed. Note that the map argument 4506 * is in/out; the returned map must be used in 4507 * the call to vm_map_lookup_done. 4508 * 4509 * A handle (out_entry) is returned for use in 4510 * vm_map_lookup_done, to make that fast. 4511 * 4512 * If a lookup is requested with "write protection" 4513 * specified, the map may be changed to perform virtual 4514 * copying operations, although the data referenced will 4515 * remain the same. 4516 */ 4517 int 4518 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4519 vm_offset_t vaddr, 4520 vm_prot_t fault_typea, 4521 vm_map_entry_t *out_entry, /* OUT */ 4522 vm_object_t *object, /* OUT */ 4523 vm_pindex_t *pindex, /* OUT */ 4524 vm_prot_t *out_prot, /* OUT */ 4525 boolean_t *wired) /* OUT */ 4526 { 4527 vm_map_entry_t entry; 4528 vm_map_t map = *var_map; 4529 vm_prot_t prot; 4530 vm_prot_t fault_type = fault_typea; 4531 vm_object_t eobject; 4532 vm_size_t size; 4533 struct ucred *cred; 4534 4535 RetryLookup: 4536 4537 vm_map_lock_read(map); 4538 4539 RetryLookupLocked: 4540 /* 4541 * Lookup the faulting address. 4542 */ 4543 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4544 vm_map_unlock_read(map); 4545 return (KERN_INVALID_ADDRESS); 4546 } 4547 4548 entry = *out_entry; 4549 4550 /* 4551 * Handle submaps. 4552 */ 4553 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4554 vm_map_t old_map = map; 4555 4556 *var_map = map = entry->object.sub_map; 4557 vm_map_unlock_read(old_map); 4558 goto RetryLookup; 4559 } 4560 4561 /* 4562 * Check whether this task is allowed to have this page. 4563 */ 4564 prot = entry->protection; 4565 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4566 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4567 if (prot == VM_PROT_NONE && map != kernel_map && 4568 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4569 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4570 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4571 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4572 goto RetryLookupLocked; 4573 } 4574 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4575 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4576 vm_map_unlock_read(map); 4577 return (KERN_PROTECTION_FAILURE); 4578 } 4579 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4580 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4581 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4582 ("entry %p flags %x", entry, entry->eflags)); 4583 if ((fault_typea & VM_PROT_COPY) != 0 && 4584 (entry->max_protection & VM_PROT_WRITE) == 0 && 4585 (entry->eflags & MAP_ENTRY_COW) == 0) { 4586 vm_map_unlock_read(map); 4587 return (KERN_PROTECTION_FAILURE); 4588 } 4589 4590 /* 4591 * If this page is not pageable, we have to get it for all possible 4592 * accesses. 4593 */ 4594 *wired = (entry->wired_count != 0); 4595 if (*wired) 4596 fault_type = entry->protection; 4597 size = entry->end - entry->start; 4598 /* 4599 * If the entry was copy-on-write, we either ... 4600 */ 4601 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4602 /* 4603 * If we want to write the page, we may as well handle that 4604 * now since we've got the map locked. 4605 * 4606 * If we don't need to write the page, we just demote the 4607 * permissions allowed. 4608 */ 4609 if ((fault_type & VM_PROT_WRITE) != 0 || 4610 (fault_typea & VM_PROT_COPY) != 0) { 4611 /* 4612 * Make a new object, and place it in the object 4613 * chain. Note that no new references have appeared 4614 * -- one just moved from the map to the new 4615 * object. 4616 */ 4617 if (vm_map_lock_upgrade(map)) 4618 goto RetryLookup; 4619 4620 if (entry->cred == NULL) { 4621 /* 4622 * The debugger owner is charged for 4623 * the memory. 4624 */ 4625 cred = curthread->td_ucred; 4626 crhold(cred); 4627 if (!swap_reserve_by_cred(size, cred)) { 4628 crfree(cred); 4629 vm_map_unlock(map); 4630 return (KERN_RESOURCE_SHORTAGE); 4631 } 4632 entry->cred = cred; 4633 } 4634 vm_object_shadow(&entry->object.vm_object, 4635 &entry->offset, size); 4636 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4637 eobject = entry->object.vm_object; 4638 if (eobject->cred != NULL) { 4639 /* 4640 * The object was not shadowed. 4641 */ 4642 swap_release_by_cred(size, entry->cred); 4643 crfree(entry->cred); 4644 entry->cred = NULL; 4645 } else if (entry->cred != NULL) { 4646 VM_OBJECT_WLOCK(eobject); 4647 eobject->cred = entry->cred; 4648 eobject->charge = size; 4649 VM_OBJECT_WUNLOCK(eobject); 4650 entry->cred = NULL; 4651 } 4652 4653 vm_map_lock_downgrade(map); 4654 } else { 4655 /* 4656 * We're attempting to read a copy-on-write page -- 4657 * don't allow writes. 4658 */ 4659 prot &= ~VM_PROT_WRITE; 4660 } 4661 } 4662 4663 /* 4664 * Create an object if necessary. 4665 */ 4666 if (entry->object.vm_object == NULL && 4667 !map->system_map) { 4668 if (vm_map_lock_upgrade(map)) 4669 goto RetryLookup; 4670 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4671 atop(size)); 4672 entry->offset = 0; 4673 if (entry->cred != NULL) { 4674 VM_OBJECT_WLOCK(entry->object.vm_object); 4675 entry->object.vm_object->cred = entry->cred; 4676 entry->object.vm_object->charge = size; 4677 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4678 entry->cred = NULL; 4679 } 4680 vm_map_lock_downgrade(map); 4681 } 4682 4683 /* 4684 * Return the object/offset from this entry. If the entry was 4685 * copy-on-write or empty, it has been fixed up. 4686 */ 4687 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4688 *object = entry->object.vm_object; 4689 4690 *out_prot = prot; 4691 return (KERN_SUCCESS); 4692 } 4693 4694 /* 4695 * vm_map_lookup_locked: 4696 * 4697 * Lookup the faulting address. A version of vm_map_lookup that returns 4698 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4699 */ 4700 int 4701 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4702 vm_offset_t vaddr, 4703 vm_prot_t fault_typea, 4704 vm_map_entry_t *out_entry, /* OUT */ 4705 vm_object_t *object, /* OUT */ 4706 vm_pindex_t *pindex, /* OUT */ 4707 vm_prot_t *out_prot, /* OUT */ 4708 boolean_t *wired) /* OUT */ 4709 { 4710 vm_map_entry_t entry; 4711 vm_map_t map = *var_map; 4712 vm_prot_t prot; 4713 vm_prot_t fault_type = fault_typea; 4714 4715 /* 4716 * Lookup the faulting address. 4717 */ 4718 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4719 return (KERN_INVALID_ADDRESS); 4720 4721 entry = *out_entry; 4722 4723 /* 4724 * Fail if the entry refers to a submap. 4725 */ 4726 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4727 return (KERN_FAILURE); 4728 4729 /* 4730 * Check whether this task is allowed to have this page. 4731 */ 4732 prot = entry->protection; 4733 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4734 if ((fault_type & prot) != fault_type) 4735 return (KERN_PROTECTION_FAILURE); 4736 4737 /* 4738 * If this page is not pageable, we have to get it for all possible 4739 * accesses. 4740 */ 4741 *wired = (entry->wired_count != 0); 4742 if (*wired) 4743 fault_type = entry->protection; 4744 4745 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4746 /* 4747 * Fail if the entry was copy-on-write for a write fault. 4748 */ 4749 if (fault_type & VM_PROT_WRITE) 4750 return (KERN_FAILURE); 4751 /* 4752 * We're attempting to read a copy-on-write page -- 4753 * don't allow writes. 4754 */ 4755 prot &= ~VM_PROT_WRITE; 4756 } 4757 4758 /* 4759 * Fail if an object should be created. 4760 */ 4761 if (entry->object.vm_object == NULL && !map->system_map) 4762 return (KERN_FAILURE); 4763 4764 /* 4765 * Return the object/offset from this entry. If the entry was 4766 * copy-on-write or empty, it has been fixed up. 4767 */ 4768 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4769 *object = entry->object.vm_object; 4770 4771 *out_prot = prot; 4772 return (KERN_SUCCESS); 4773 } 4774 4775 /* 4776 * vm_map_lookup_done: 4777 * 4778 * Releases locks acquired by a vm_map_lookup 4779 * (according to the handle returned by that lookup). 4780 */ 4781 void 4782 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4783 { 4784 /* 4785 * Unlock the main-level map 4786 */ 4787 vm_map_unlock_read(map); 4788 } 4789 4790 vm_offset_t 4791 vm_map_max_KBI(const struct vm_map *map) 4792 { 4793 4794 return (vm_map_max(map)); 4795 } 4796 4797 vm_offset_t 4798 vm_map_min_KBI(const struct vm_map *map) 4799 { 4800 4801 return (vm_map_min(map)); 4802 } 4803 4804 pmap_t 4805 vm_map_pmap_KBI(vm_map_t map) 4806 { 4807 4808 return (map->pmap); 4809 } 4810 4811 #ifdef INVARIANTS 4812 static void 4813 _vm_map_assert_consistent(vm_map_t map, int check) 4814 { 4815 vm_map_entry_t entry, prev; 4816 vm_size_t max_left, max_right; 4817 4818 if (enable_vmmap_check != check) 4819 return; 4820 4821 prev = &map->header; 4822 VM_MAP_ENTRY_FOREACH(entry, map) { 4823 KASSERT(prev->end <= entry->start, 4824 ("map %p prev->end = %jx, start = %jx", map, 4825 (uintmax_t)prev->end, (uintmax_t)entry->start)); 4826 KASSERT(entry->start < entry->end, 4827 ("map %p start = %jx, end = %jx", map, 4828 (uintmax_t)entry->start, (uintmax_t)entry->end)); 4829 KASSERT(entry->end <= entry->next->start, 4830 ("map %p end = %jx, next->start = %jx", map, 4831 (uintmax_t)entry->end, (uintmax_t)entry->next->start)); 4832 KASSERT(entry->left == NULL || 4833 entry->left->start < entry->start, 4834 ("map %p left->start = %jx, start = %jx", map, 4835 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 4836 KASSERT(entry->right == NULL || 4837 entry->start < entry->right->start, 4838 ("map %p start = %jx, right->start = %jx", map, 4839 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 4840 max_left = vm_map_entry_max_free_left(entry, entry->prev); 4841 max_right = vm_map_entry_max_free_right(entry, entry->next); 4842 KASSERT(entry->max_free == MAX(max_left, max_right), 4843 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 4844 (uintmax_t)entry->max_free, 4845 (uintmax_t)max_left, (uintmax_t)max_right)); 4846 prev = entry; 4847 } 4848 KASSERT(prev->end <= entry->start, 4849 ("map %p prev->end = %jx, start = %jx", map, 4850 (uintmax_t)prev->end, (uintmax_t)entry->start)); 4851 } 4852 #endif 4853 4854 #include "opt_ddb.h" 4855 #ifdef DDB 4856 #include <sys/kernel.h> 4857 4858 #include <ddb/ddb.h> 4859 4860 static void 4861 vm_map_print(vm_map_t map) 4862 { 4863 vm_map_entry_t entry, prev; 4864 4865 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4866 (void *)map, 4867 (void *)map->pmap, map->nentries, map->timestamp); 4868 4869 db_indent += 2; 4870 prev = &map->header; 4871 VM_MAP_ENTRY_FOREACH(entry, map) { 4872 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4873 (void *)entry, (void *)entry->start, (void *)entry->end, 4874 entry->eflags); 4875 { 4876 static char *inheritance_name[4] = 4877 {"share", "copy", "none", "donate_copy"}; 4878 4879 db_iprintf(" prot=%x/%x/%s", 4880 entry->protection, 4881 entry->max_protection, 4882 inheritance_name[(int)(unsigned char) 4883 entry->inheritance]); 4884 if (entry->wired_count != 0) 4885 db_printf(", wired"); 4886 } 4887 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4888 db_printf(", share=%p, offset=0x%jx\n", 4889 (void *)entry->object.sub_map, 4890 (uintmax_t)entry->offset); 4891 if (prev == &map->header || 4892 prev->object.sub_map != 4893 entry->object.sub_map) { 4894 db_indent += 2; 4895 vm_map_print((vm_map_t)entry->object.sub_map); 4896 db_indent -= 2; 4897 } 4898 } else { 4899 if (entry->cred != NULL) 4900 db_printf(", ruid %d", entry->cred->cr_ruid); 4901 db_printf(", object=%p, offset=0x%jx", 4902 (void *)entry->object.vm_object, 4903 (uintmax_t)entry->offset); 4904 if (entry->object.vm_object && entry->object.vm_object->cred) 4905 db_printf(", obj ruid %d charge %jx", 4906 entry->object.vm_object->cred->cr_ruid, 4907 (uintmax_t)entry->object.vm_object->charge); 4908 if (entry->eflags & MAP_ENTRY_COW) 4909 db_printf(", copy (%s)", 4910 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4911 db_printf("\n"); 4912 4913 if (prev == &map->header || 4914 prev->object.vm_object != 4915 entry->object.vm_object) { 4916 db_indent += 2; 4917 vm_object_print((db_expr_t)(intptr_t) 4918 entry->object.vm_object, 4919 0, 0, (char *)0); 4920 db_indent -= 2; 4921 } 4922 } 4923 prev = entry; 4924 } 4925 db_indent -= 2; 4926 } 4927 4928 DB_SHOW_COMMAND(map, map) 4929 { 4930 4931 if (!have_addr) { 4932 db_printf("usage: show map <addr>\n"); 4933 return; 4934 } 4935 vm_map_print((vm_map_t)addr); 4936 } 4937 4938 DB_SHOW_COMMAND(procvm, procvm) 4939 { 4940 struct proc *p; 4941 4942 if (have_addr) { 4943 p = db_lookup_proc(addr); 4944 } else { 4945 p = curproc; 4946 } 4947 4948 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4949 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4950 (void *)vmspace_pmap(p->p_vmspace)); 4951 4952 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4953 } 4954 4955 #endif /* DDB */ 4956