1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/elf.h> 68 #include <sys/kernel.h> 69 #include <sys/ktr.h> 70 #include <sys/lock.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/vmmeter.h> 74 #include <sys/mman.h> 75 #include <sys/vnode.h> 76 #include <sys/racct.h> 77 #include <sys/resourcevar.h> 78 #include <sys/rwlock.h> 79 #include <sys/file.h> 80 #include <sys/sysctl.h> 81 #include <sys/sysent.h> 82 #include <sys/shm.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_pager.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vnode_pager.h> 95 #include <vm/swap_pager.h> 96 #include <vm/uma.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, 100 * and sharing of virtual memory objects. In addition, 101 * this module provides for an efficient virtual copy of 102 * memory from one map to another. 103 * 104 * Synchronization is required prior to most operations. 105 * 106 * Maps consist of an ordered doubly-linked list of simple 107 * entries; a self-adjusting binary search tree of these 108 * entries is used to speed up lookups. 109 * 110 * Since portions of maps are specified by start/end addresses, 111 * which may not align with existing map entries, all 112 * routines merely "clip" entries to these start/end values. 113 * [That is, an entry is split into two, bordering at a 114 * start or end value.] Note that these clippings may not 115 * always be necessary (as the two resulting entries are then 116 * not changed); however, the clipping is done for convenience. 117 * 118 * As mentioned above, virtual copy operations are performed 119 * by copying VM object references from one map to 120 * another, and then marking both regions as copy-on-write. 121 */ 122 123 static struct mtx map_sleep_mtx; 124 static uma_zone_t mapentzone; 125 static uma_zone_t kmapentzone; 126 static uma_zone_t vmspace_zone; 127 static int vmspace_zinit(void *mem, int size, int flags); 128 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 129 vm_offset_t max); 130 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 133 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 134 vm_map_entry_t gap_entry); 135 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 136 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 137 #ifdef INVARIANTS 138 static void vmspace_zdtor(void *mem, int size, void *arg); 139 #endif 140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 141 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 142 int cow); 143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 144 vm_offset_t failed_addr); 145 146 #define CONTAINS_BITS(set, bits) ((~(set) & (bits)) == 0) 147 148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 151 152 /* 153 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 154 * stable. 155 */ 156 #define PROC_VMSPACE_LOCK(p) do { } while (0) 157 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 158 159 /* 160 * VM_MAP_RANGE_CHECK: [ internal use only ] 161 * 162 * Asserts that the starting and ending region 163 * addresses fall within the valid range of the map. 164 */ 165 #define VM_MAP_RANGE_CHECK(map, start, end) \ 166 { \ 167 if (start < vm_map_min(map)) \ 168 start = vm_map_min(map); \ 169 if (end > vm_map_max(map)) \ 170 end = vm_map_max(map); \ 171 if (start > end) \ 172 start = end; \ 173 } 174 175 #ifndef UMA_USE_DMAP 176 177 /* 178 * Allocate a new slab for kernel map entries. The kernel map may be locked or 179 * unlocked, depending on whether the request is coming from the kernel map or a 180 * submap. This function allocates a virtual address range directly from the 181 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map 182 * lock and also to avoid triggering allocator recursion in the vmem boundary 183 * tag allocator. 184 */ 185 static void * 186 kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 187 int wait) 188 { 189 vm_offset_t addr; 190 int error, locked; 191 192 *pflag = UMA_SLAB_PRIV; 193 194 if (!(locked = vm_map_locked(kernel_map))) 195 vm_map_lock(kernel_map); 196 addr = vm_map_findspace(kernel_map, vm_map_min(kernel_map), bytes); 197 if (addr + bytes < addr || addr + bytes > vm_map_max(kernel_map)) 198 panic("%s: kernel map is exhausted", __func__); 199 error = vm_map_insert(kernel_map, NULL, 0, addr, addr + bytes, 200 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 201 if (error != KERN_SUCCESS) 202 panic("%s: vm_map_insert() failed: %d", __func__, error); 203 if (!locked) 204 vm_map_unlock(kernel_map); 205 error = kmem_back_domain(domain, kernel_object, addr, bytes, M_NOWAIT | 206 M_USE_RESERVE | (wait & M_ZERO)); 207 if (error == KERN_SUCCESS) { 208 return ((void *)addr); 209 } else { 210 if (!locked) 211 vm_map_lock(kernel_map); 212 vm_map_delete(kernel_map, addr, bytes); 213 if (!locked) 214 vm_map_unlock(kernel_map); 215 return (NULL); 216 } 217 } 218 219 static void 220 kmapent_free(void *item, vm_size_t size, uint8_t pflag) 221 { 222 vm_offset_t addr; 223 int error __diagused; 224 225 if ((pflag & UMA_SLAB_PRIV) == 0) 226 /* XXX leaked */ 227 return; 228 229 addr = (vm_offset_t)item; 230 kmem_unback(kernel_object, addr, size); 231 error = vm_map_remove(kernel_map, addr, addr + size); 232 KASSERT(error == KERN_SUCCESS, 233 ("%s: vm_map_remove failed: %d", __func__, error)); 234 } 235 236 /* 237 * The worst-case upper bound on the number of kernel map entries that may be 238 * created before the zone must be replenished in _vm_map_unlock(). 239 */ 240 #define KMAPENT_RESERVE 1 241 242 #endif /* !UMD_MD_SMALL_ALLOC */ 243 244 /* 245 * vm_map_startup: 246 * 247 * Initialize the vm_map module. Must be called before any other vm_map 248 * routines. 249 * 250 * User map and entry structures are allocated from the general purpose 251 * memory pool. Kernel maps are statically defined. Kernel map entries 252 * require special handling to avoid recursion; see the comments above 253 * kmapent_alloc() and in vm_map_entry_create(). 254 */ 255 void 256 vm_map_startup(void) 257 { 258 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 259 260 /* 261 * Disable the use of per-CPU buckets: map entry allocation is 262 * serialized by the kernel map lock. 263 */ 264 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 266 UMA_ZONE_VM | UMA_ZONE_NOBUCKET); 267 #ifndef UMA_USE_DMAP 268 /* Reserve an extra map entry for use when replenishing the reserve. */ 269 uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1); 270 uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1); 271 uma_zone_set_allocf(kmapentzone, kmapent_alloc); 272 uma_zone_set_freef(kmapentzone, kmapent_free); 273 #endif 274 275 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 276 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 277 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 278 #ifdef INVARIANTS 279 vmspace_zdtor, 280 #else 281 NULL, 282 #endif 283 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 284 } 285 286 static int 287 vmspace_zinit(void *mem, int size, int flags) 288 { 289 struct vmspace *vm; 290 vm_map_t map; 291 292 vm = (struct vmspace *)mem; 293 map = &vm->vm_map; 294 295 memset(map, 0, sizeof(*map)); 296 mtx_init(&map->system_mtx, "vm map (system)", NULL, 297 MTX_DEF | MTX_DUPOK); 298 sx_init(&map->lock, "vm map (user)"); 299 PMAP_LOCK_INIT(vmspace_pmap(vm)); 300 return (0); 301 } 302 303 #ifdef INVARIANTS 304 static void 305 vmspace_zdtor(void *mem, int size, void *arg) 306 { 307 struct vmspace *vm; 308 309 vm = (struct vmspace *)mem; 310 KASSERT(vm->vm_map.nentries == 0, 311 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); 312 KASSERT(vm->vm_map.size == 0, 313 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); 314 } 315 #endif /* INVARIANTS */ 316 317 /* 318 * Allocate a vmspace structure, including a vm_map and pmap, 319 * and initialize those structures. The refcnt is set to 1. 320 */ 321 struct vmspace * 322 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 323 { 324 struct vmspace *vm; 325 326 vm = uma_zalloc(vmspace_zone, M_WAITOK); 327 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 328 if (!pinit(vmspace_pmap(vm))) { 329 uma_zfree(vmspace_zone, vm); 330 return (NULL); 331 } 332 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 333 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 334 refcount_init(&vm->vm_refcnt, 1); 335 vm->vm_shm = NULL; 336 vm->vm_swrss = 0; 337 vm->vm_tsize = 0; 338 vm->vm_dsize = 0; 339 vm->vm_ssize = 0; 340 vm->vm_taddr = 0; 341 vm->vm_daddr = 0; 342 vm->vm_maxsaddr = 0; 343 return (vm); 344 } 345 346 #ifdef RACCT 347 static void 348 vmspace_container_reset(struct proc *p) 349 { 350 351 PROC_LOCK(p); 352 racct_set(p, RACCT_DATA, 0); 353 racct_set(p, RACCT_STACK, 0); 354 racct_set(p, RACCT_RSS, 0); 355 racct_set(p, RACCT_MEMLOCK, 0); 356 racct_set(p, RACCT_VMEM, 0); 357 PROC_UNLOCK(p); 358 } 359 #endif 360 361 static inline void 362 vmspace_dofree(struct vmspace *vm) 363 { 364 365 CTR1(KTR_VM, "vmspace_free: %p", vm); 366 367 /* 368 * Make sure any SysV shm is freed, it might not have been in 369 * exit1(). 370 */ 371 shmexit(vm); 372 373 /* 374 * Lock the map, to wait out all other references to it. 375 * Delete all of the mappings and pages they hold, then call 376 * the pmap module to reclaim anything left. 377 */ 378 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 379 vm_map_max(&vm->vm_map)); 380 381 pmap_release(vmspace_pmap(vm)); 382 vm->vm_map.pmap = NULL; 383 uma_zfree(vmspace_zone, vm); 384 } 385 386 void 387 vmspace_free(struct vmspace *vm) 388 { 389 390 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 391 "vmspace_free() called"); 392 393 if (refcount_release(&vm->vm_refcnt)) 394 vmspace_dofree(vm); 395 } 396 397 void 398 vmspace_exitfree(struct proc *p) 399 { 400 struct vmspace *vm; 401 402 PROC_VMSPACE_LOCK(p); 403 vm = p->p_vmspace; 404 p->p_vmspace = NULL; 405 PROC_VMSPACE_UNLOCK(p); 406 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 407 vmspace_free(vm); 408 } 409 410 void 411 vmspace_exit(struct thread *td) 412 { 413 struct vmspace *vm; 414 struct proc *p; 415 bool released; 416 417 p = td->td_proc; 418 vm = p->p_vmspace; 419 420 /* 421 * Prepare to release the vmspace reference. The thread that releases 422 * the last reference is responsible for tearing down the vmspace. 423 * However, threads not releasing the final reference must switch to the 424 * kernel's vmspace0 before the decrement so that the subsequent pmap 425 * deactivation does not modify a freed vmspace. 426 */ 427 refcount_acquire(&vmspace0.vm_refcnt); 428 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { 429 if (p->p_vmspace != &vmspace0) { 430 PROC_VMSPACE_LOCK(p); 431 p->p_vmspace = &vmspace0; 432 PROC_VMSPACE_UNLOCK(p); 433 pmap_activate(td); 434 } 435 released = refcount_release(&vm->vm_refcnt); 436 } 437 if (released) { 438 /* 439 * pmap_remove_pages() expects the pmap to be active, so switch 440 * back first if necessary. 441 */ 442 if (p->p_vmspace != vm) { 443 PROC_VMSPACE_LOCK(p); 444 p->p_vmspace = vm; 445 PROC_VMSPACE_UNLOCK(p); 446 pmap_activate(td); 447 } 448 pmap_remove_pages(vmspace_pmap(vm)); 449 PROC_VMSPACE_LOCK(p); 450 p->p_vmspace = &vmspace0; 451 PROC_VMSPACE_UNLOCK(p); 452 pmap_activate(td); 453 vmspace_dofree(vm); 454 } 455 #ifdef RACCT 456 if (racct_enable) 457 vmspace_container_reset(p); 458 #endif 459 } 460 461 /* Acquire reference to vmspace owned by another process. */ 462 463 struct vmspace * 464 vmspace_acquire_ref(struct proc *p) 465 { 466 struct vmspace *vm; 467 468 PROC_VMSPACE_LOCK(p); 469 vm = p->p_vmspace; 470 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { 471 PROC_VMSPACE_UNLOCK(p); 472 return (NULL); 473 } 474 if (vm != p->p_vmspace) { 475 PROC_VMSPACE_UNLOCK(p); 476 vmspace_free(vm); 477 return (NULL); 478 } 479 PROC_VMSPACE_UNLOCK(p); 480 return (vm); 481 } 482 483 /* 484 * Switch between vmspaces in an AIO kernel process. 485 * 486 * The new vmspace is either the vmspace of a user process obtained 487 * from an active AIO request or the initial vmspace of the AIO kernel 488 * process (when it is idling). Because user processes will block to 489 * drain any active AIO requests before proceeding in exit() or 490 * execve(), the reference count for vmspaces from AIO requests can 491 * never be 0. Similarly, AIO kernel processes hold an extra 492 * reference on their initial vmspace for the life of the process. As 493 * a result, the 'newvm' vmspace always has a non-zero reference 494 * count. This permits an additional reference on 'newvm' to be 495 * acquired via a simple atomic increment rather than the loop in 496 * vmspace_acquire_ref() above. 497 */ 498 void 499 vmspace_switch_aio(struct vmspace *newvm) 500 { 501 struct vmspace *oldvm; 502 503 /* XXX: Need some way to assert that this is an aio daemon. */ 504 505 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, 506 ("vmspace_switch_aio: newvm unreferenced")); 507 508 oldvm = curproc->p_vmspace; 509 if (oldvm == newvm) 510 return; 511 512 /* 513 * Point to the new address space and refer to it. 514 */ 515 curproc->p_vmspace = newvm; 516 refcount_acquire(&newvm->vm_refcnt); 517 518 /* Activate the new mapping. */ 519 pmap_activate(curthread); 520 521 vmspace_free(oldvm); 522 } 523 524 void 525 _vm_map_lock(vm_map_t map, const char *file, int line) 526 { 527 528 if (map->system_map) 529 mtx_lock_flags_(&map->system_mtx, 0, file, line); 530 else 531 sx_xlock_(&map->lock, file, line); 532 map->timestamp++; 533 } 534 535 void 536 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 537 { 538 vm_object_t object; 539 struct vnode *vp; 540 bool vp_held; 541 542 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 543 return; 544 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 545 ("Submap with execs")); 546 object = entry->object.vm_object; 547 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 548 if ((object->flags & OBJ_ANON) != 0) 549 object = object->handle; 550 else 551 KASSERT(object->backing_object == NULL, 552 ("non-anon object %p shadows", object)); 553 KASSERT(object != NULL, ("No content object for text, entry %p obj %p", 554 entry, entry->object.vm_object)); 555 556 /* 557 * Mostly, we do not lock the backing object. It is 558 * referenced by the entry we are processing, so it cannot go 559 * away. 560 */ 561 vm_pager_getvp(object, &vp, &vp_held); 562 if (vp != NULL) { 563 if (add) { 564 VOP_SET_TEXT_CHECKED(vp); 565 } else { 566 vn_lock(vp, LK_SHARED | LK_RETRY); 567 VOP_UNSET_TEXT_CHECKED(vp); 568 VOP_UNLOCK(vp); 569 } 570 if (vp_held) 571 vdrop(vp); 572 } 573 } 574 575 /* 576 * Use a different name for this vm_map_entry field when it's use 577 * is not consistent with its use as part of an ordered search tree. 578 */ 579 #define defer_next right 580 581 static void 582 vm_map_process_deferred(void) 583 { 584 struct thread *td; 585 vm_map_entry_t entry, next; 586 vm_object_t object; 587 588 td = curthread; 589 entry = td->td_map_def_user; 590 td->td_map_def_user = NULL; 591 while (entry != NULL) { 592 next = entry->defer_next; 593 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | 594 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | 595 MAP_ENTRY_VN_EXEC)); 596 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { 597 /* 598 * Decrement the object's writemappings and 599 * possibly the vnode's v_writecount. 600 */ 601 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 602 ("Submap with writecount")); 603 object = entry->object.vm_object; 604 KASSERT(object != NULL, ("No object for writecount")); 605 vm_pager_release_writecount(object, entry->start, 606 entry->end); 607 } 608 vm_map_entry_set_vnode_text(entry, false); 609 vm_map_entry_deallocate(entry, FALSE); 610 entry = next; 611 } 612 } 613 614 #ifdef INVARIANTS 615 static void 616 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 617 { 618 619 if (map->system_map) 620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 621 else 622 sx_assert_(&map->lock, SA_XLOCKED, file, line); 623 } 624 625 #define VM_MAP_ASSERT_LOCKED(map) \ 626 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 627 628 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL }; 629 #ifdef DIAGNOSTIC 630 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK; 631 #else 632 static int enable_vmmap_check = VMMAP_CHECK_NONE; 633 #endif 634 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 635 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 636 637 static void _vm_map_assert_consistent(vm_map_t map, int check); 638 639 #define VM_MAP_ASSERT_CONSISTENT(map) \ 640 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL) 641 #ifdef DIAGNOSTIC 642 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \ 643 if (map->nupdates > map->nentries) { \ 644 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \ 645 map->nupdates = 0; \ 646 } \ 647 } while (0) 648 #else 649 #define VM_MAP_UNLOCK_CONSISTENT(map) 650 #endif 651 #else 652 #define VM_MAP_ASSERT_LOCKED(map) 653 #define VM_MAP_ASSERT_CONSISTENT(map) 654 #define VM_MAP_UNLOCK_CONSISTENT(map) 655 #endif /* INVARIANTS */ 656 657 void 658 _vm_map_unlock(vm_map_t map, const char *file, int line) 659 { 660 661 VM_MAP_UNLOCK_CONSISTENT(map); 662 if (map->system_map) { 663 #ifndef UMA_USE_DMAP 664 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { 665 uma_prealloc(kmapentzone, 1); 666 map->flags &= ~MAP_REPLENISH; 667 } 668 #endif 669 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 670 } else { 671 sx_xunlock_(&map->lock, file, line); 672 vm_map_process_deferred(); 673 } 674 } 675 676 void 677 _vm_map_lock_read(vm_map_t map, const char *file, int line) 678 { 679 680 if (map->system_map) 681 mtx_lock_flags_(&map->system_mtx, 0, file, line); 682 else 683 sx_slock_(&map->lock, file, line); 684 } 685 686 void 687 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 688 { 689 690 if (map->system_map) { 691 KASSERT((map->flags & MAP_REPLENISH) == 0, 692 ("%s: MAP_REPLENISH leaked", __func__)); 693 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 694 } else { 695 sx_sunlock_(&map->lock, file, line); 696 vm_map_process_deferred(); 697 } 698 } 699 700 int 701 _vm_map_trylock(vm_map_t map, const char *file, int line) 702 { 703 int error; 704 705 error = map->system_map ? 706 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 707 !sx_try_xlock_(&map->lock, file, line); 708 if (error == 0) 709 map->timestamp++; 710 return (error == 0); 711 } 712 713 int 714 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 715 { 716 int error; 717 718 error = map->system_map ? 719 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 720 !sx_try_slock_(&map->lock, file, line); 721 return (error == 0); 722 } 723 724 /* 725 * _vm_map_lock_upgrade: [ internal use only ] 726 * 727 * Tries to upgrade a read (shared) lock on the specified map to a write 728 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 729 * non-zero value if the upgrade fails. If the upgrade fails, the map is 730 * returned without a read or write lock held. 731 * 732 * Requires that the map be read locked. 733 */ 734 int 735 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 736 { 737 unsigned int last_timestamp; 738 739 if (map->system_map) { 740 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 741 } else { 742 if (!sx_try_upgrade_(&map->lock, file, line)) { 743 last_timestamp = map->timestamp; 744 sx_sunlock_(&map->lock, file, line); 745 vm_map_process_deferred(); 746 /* 747 * If the map's timestamp does not change while the 748 * map is unlocked, then the upgrade succeeds. 749 */ 750 sx_xlock_(&map->lock, file, line); 751 if (last_timestamp != map->timestamp) { 752 sx_xunlock_(&map->lock, file, line); 753 return (1); 754 } 755 } 756 } 757 map->timestamp++; 758 return (0); 759 } 760 761 void 762 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 763 { 764 765 if (map->system_map) { 766 KASSERT((map->flags & MAP_REPLENISH) == 0, 767 ("%s: MAP_REPLENISH leaked", __func__)); 768 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 769 } else { 770 VM_MAP_UNLOCK_CONSISTENT(map); 771 sx_downgrade_(&map->lock, file, line); 772 } 773 } 774 775 /* 776 * vm_map_locked: 777 * 778 * Returns a non-zero value if the caller holds a write (exclusive) lock 779 * on the specified map and the value "0" otherwise. 780 */ 781 int 782 vm_map_locked(vm_map_t map) 783 { 784 785 if (map->system_map) 786 return (mtx_owned(&map->system_mtx)); 787 else 788 return (sx_xlocked(&map->lock)); 789 } 790 791 /* 792 * _vm_map_unlock_and_wait: 793 * 794 * Atomically releases the lock on the specified map and puts the calling 795 * thread to sleep. The calling thread will remain asleep until either 796 * vm_map_wakeup() is performed on the map or the specified timeout is 797 * exceeded. 798 * 799 * WARNING! This function does not perform deferred deallocations of 800 * objects and map entries. Therefore, the calling thread is expected to 801 * reacquire the map lock after reawakening and later perform an ordinary 802 * unlock operation, such as vm_map_unlock(), before completing its 803 * operation on the map. 804 */ 805 int 806 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 807 { 808 809 VM_MAP_UNLOCK_CONSISTENT(map); 810 mtx_lock(&map_sleep_mtx); 811 if (map->system_map) { 812 KASSERT((map->flags & MAP_REPLENISH) == 0, 813 ("%s: MAP_REPLENISH leaked", __func__)); 814 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 815 } else { 816 sx_xunlock_(&map->lock, file, line); 817 } 818 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 819 timo)); 820 } 821 822 /* 823 * vm_map_wakeup: 824 * 825 * Awaken any threads that have slept on the map using 826 * vm_map_unlock_and_wait(). 827 */ 828 void 829 vm_map_wakeup(vm_map_t map) 830 { 831 832 /* 833 * Acquire and release map_sleep_mtx to prevent a wakeup() 834 * from being performed (and lost) between the map unlock 835 * and the msleep() in _vm_map_unlock_and_wait(). 836 */ 837 mtx_lock(&map_sleep_mtx); 838 mtx_unlock(&map_sleep_mtx); 839 wakeup(&map->root); 840 } 841 842 void 843 vm_map_busy(vm_map_t map) 844 { 845 846 VM_MAP_ASSERT_LOCKED(map); 847 map->busy++; 848 } 849 850 void 851 vm_map_unbusy(vm_map_t map) 852 { 853 854 VM_MAP_ASSERT_LOCKED(map); 855 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 856 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 857 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 858 wakeup(&map->busy); 859 } 860 } 861 862 void 863 vm_map_wait_busy(vm_map_t map) 864 { 865 866 VM_MAP_ASSERT_LOCKED(map); 867 while (map->busy) { 868 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 869 if (map->system_map) 870 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 871 else 872 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 873 } 874 map->timestamp++; 875 } 876 877 long 878 vmspace_resident_count(struct vmspace *vmspace) 879 { 880 return pmap_resident_count(vmspace_pmap(vmspace)); 881 } 882 883 /* 884 * Initialize an existing vm_map structure 885 * such as that in the vmspace structure. 886 */ 887 static void 888 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 889 { 890 891 map->header.eflags = MAP_ENTRY_HEADER; 892 map->needs_wakeup = FALSE; 893 map->system_map = 0; 894 map->pmap = pmap; 895 map->header.end = min; 896 map->header.start = max; 897 map->flags = 0; 898 map->header.left = map->header.right = &map->header; 899 map->root = NULL; 900 map->timestamp = 0; 901 map->busy = 0; 902 map->anon_loc = 0; 903 #ifdef DIAGNOSTIC 904 map->nupdates = 0; 905 #endif 906 } 907 908 void 909 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 910 { 911 912 _vm_map_init(map, pmap, min, max); 913 mtx_init(&map->system_mtx, "vm map (system)", NULL, 914 MTX_DEF | MTX_DUPOK); 915 sx_init(&map->lock, "vm map (user)"); 916 } 917 918 /* 919 * vm_map_entry_dispose: [ internal use only ] 920 * 921 * Inverse of vm_map_entry_create. 922 */ 923 static void 924 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 925 { 926 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 927 } 928 929 /* 930 * vm_map_entry_create: [ internal use only ] 931 * 932 * Allocates a VM map entry for insertion. 933 * No entry fields are filled in. 934 */ 935 static vm_map_entry_t 936 vm_map_entry_create(vm_map_t map) 937 { 938 vm_map_entry_t new_entry; 939 940 #ifndef UMA_USE_DMAP 941 if (map == kernel_map) { 942 VM_MAP_ASSERT_LOCKED(map); 943 944 /* 945 * A new slab of kernel map entries cannot be allocated at this 946 * point because the kernel map has not yet been updated to 947 * reflect the caller's request. Therefore, we allocate a new 948 * map entry, dipping into the reserve if necessary, and set a 949 * flag indicating that the reserve must be replenished before 950 * the map is unlocked. 951 */ 952 new_entry = uma_zalloc(kmapentzone, M_NOWAIT | M_NOVM); 953 if (new_entry == NULL) { 954 new_entry = uma_zalloc(kmapentzone, 955 M_NOWAIT | M_NOVM | M_USE_RESERVE); 956 kernel_map->flags |= MAP_REPLENISH; 957 } 958 } else 959 #endif 960 if (map->system_map) { 961 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 962 } else { 963 new_entry = uma_zalloc(mapentzone, M_WAITOK); 964 } 965 KASSERT(new_entry != NULL, 966 ("vm_map_entry_create: kernel resources exhausted")); 967 return (new_entry); 968 } 969 970 /* 971 * vm_map_entry_set_behavior: 972 * 973 * Set the expected access behavior, either normal, random, or 974 * sequential. 975 */ 976 static inline void 977 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 978 { 979 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 980 (behavior & MAP_ENTRY_BEHAV_MASK); 981 } 982 983 /* 984 * vm_map_entry_max_free_{left,right}: 985 * 986 * Compute the size of the largest free gap between two entries, 987 * one the root of a tree and the other the ancestor of that root 988 * that is the least or greatest ancestor found on the search path. 989 */ 990 static inline vm_size_t 991 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) 992 { 993 994 return (root->left != left_ancestor ? 995 root->left->max_free : root->start - left_ancestor->end); 996 } 997 998 static inline vm_size_t 999 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) 1000 { 1001 1002 return (root->right != right_ancestor ? 1003 root->right->max_free : right_ancestor->start - root->end); 1004 } 1005 1006 /* 1007 * vm_map_entry_{pred,succ}: 1008 * 1009 * Find the {predecessor, successor} of the entry by taking one step 1010 * in the appropriate direction and backtracking as much as necessary. 1011 * vm_map_entry_succ is defined in vm_map.h. 1012 */ 1013 static inline vm_map_entry_t 1014 vm_map_entry_pred(vm_map_entry_t entry) 1015 { 1016 vm_map_entry_t prior; 1017 1018 prior = entry->left; 1019 if (prior->right->start < entry->start) { 1020 do 1021 prior = prior->right; 1022 while (prior->right != entry); 1023 } 1024 return (prior); 1025 } 1026 1027 static inline vm_size_t 1028 vm_size_max(vm_size_t a, vm_size_t b) 1029 { 1030 1031 return (a > b ? a : b); 1032 } 1033 1034 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \ 1035 vm_map_entry_t z; \ 1036 vm_size_t max_free; \ 1037 \ 1038 /* \ 1039 * Infer root->right->max_free == root->max_free when \ 1040 * y->max_free < root->max_free || root->max_free == 0. \ 1041 * Otherwise, look right to find it. \ 1042 */ \ 1043 y = root->left; \ 1044 max_free = root->max_free; \ 1045 KASSERT(max_free == vm_size_max( \ 1046 vm_map_entry_max_free_left(root, llist), \ 1047 vm_map_entry_max_free_right(root, rlist)), \ 1048 ("%s: max_free invariant fails", __func__)); \ 1049 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \ 1050 max_free = vm_map_entry_max_free_right(root, rlist); \ 1051 if (y != llist && (test)) { \ 1052 /* Rotate right and make y root. */ \ 1053 z = y->right; \ 1054 if (z != root) { \ 1055 root->left = z; \ 1056 y->right = root; \ 1057 if (max_free < y->max_free) \ 1058 root->max_free = max_free = \ 1059 vm_size_max(max_free, z->max_free); \ 1060 } else if (max_free < y->max_free) \ 1061 root->max_free = max_free = \ 1062 vm_size_max(max_free, root->start - y->end);\ 1063 root = y; \ 1064 y = root->left; \ 1065 } \ 1066 /* Copy right->max_free. Put root on rlist. */ \ 1067 root->max_free = max_free; \ 1068 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \ 1069 ("%s: max_free not copied from right", __func__)); \ 1070 root->left = rlist; \ 1071 rlist = root; \ 1072 root = y != llist ? y : NULL; \ 1073 } while (0) 1074 1075 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \ 1076 vm_map_entry_t z; \ 1077 vm_size_t max_free; \ 1078 \ 1079 /* \ 1080 * Infer root->left->max_free == root->max_free when \ 1081 * y->max_free < root->max_free || root->max_free == 0. \ 1082 * Otherwise, look left to find it. \ 1083 */ \ 1084 y = root->right; \ 1085 max_free = root->max_free; \ 1086 KASSERT(max_free == vm_size_max( \ 1087 vm_map_entry_max_free_left(root, llist), \ 1088 vm_map_entry_max_free_right(root, rlist)), \ 1089 ("%s: max_free invariant fails", __func__)); \ 1090 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \ 1091 max_free = vm_map_entry_max_free_left(root, llist); \ 1092 if (y != rlist && (test)) { \ 1093 /* Rotate left and make y root. */ \ 1094 z = y->left; \ 1095 if (z != root) { \ 1096 root->right = z; \ 1097 y->left = root; \ 1098 if (max_free < y->max_free) \ 1099 root->max_free = max_free = \ 1100 vm_size_max(max_free, z->max_free); \ 1101 } else if (max_free < y->max_free) \ 1102 root->max_free = max_free = \ 1103 vm_size_max(max_free, y->start - root->end);\ 1104 root = y; \ 1105 y = root->right; \ 1106 } \ 1107 /* Copy left->max_free. Put root on llist. */ \ 1108 root->max_free = max_free; \ 1109 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \ 1110 ("%s: max_free not copied from left", __func__)); \ 1111 root->right = llist; \ 1112 llist = root; \ 1113 root = y != rlist ? y : NULL; \ 1114 } while (0) 1115 1116 /* 1117 * Walk down the tree until we find addr or a gap where addr would go, breaking 1118 * off left and right subtrees of nodes less than, or greater than addr. Treat 1119 * subtrees with root->max_free < length as empty trees. llist and rlist are 1120 * the two sides in reverse order (bottom-up), with llist linked by the right 1121 * pointer and rlist linked by the left pointer in the vm_map_entry, and both 1122 * lists terminated by &map->header. This function, and the subsequent call to 1123 * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address 1124 * values in &map->header. 1125 */ 1126 static __always_inline vm_map_entry_t 1127 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, 1128 vm_map_entry_t *llist, vm_map_entry_t *rlist) 1129 { 1130 vm_map_entry_t left, right, root, y; 1131 1132 left = right = &map->header; 1133 root = map->root; 1134 while (root != NULL && root->max_free >= length) { 1135 KASSERT(left->end <= root->start && 1136 root->end <= right->start, 1137 ("%s: root not within tree bounds", __func__)); 1138 if (addr < root->start) { 1139 SPLAY_LEFT_STEP(root, y, left, right, 1140 y->max_free >= length && addr < y->start); 1141 } else if (addr >= root->end) { 1142 SPLAY_RIGHT_STEP(root, y, left, right, 1143 y->max_free >= length && addr >= y->end); 1144 } else 1145 break; 1146 } 1147 *llist = left; 1148 *rlist = right; 1149 return (root); 1150 } 1151 1152 static __always_inline void 1153 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist) 1154 { 1155 vm_map_entry_t hi, right, y; 1156 1157 right = *rlist; 1158 hi = root->right == right ? NULL : root->right; 1159 if (hi == NULL) 1160 return; 1161 do 1162 SPLAY_LEFT_STEP(hi, y, root, right, true); 1163 while (hi != NULL); 1164 *rlist = right; 1165 } 1166 1167 static __always_inline void 1168 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist) 1169 { 1170 vm_map_entry_t left, lo, y; 1171 1172 left = *llist; 1173 lo = root->left == left ? NULL : root->left; 1174 if (lo == NULL) 1175 return; 1176 do 1177 SPLAY_RIGHT_STEP(lo, y, left, root, true); 1178 while (lo != NULL); 1179 *llist = left; 1180 } 1181 1182 static inline void 1183 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b) 1184 { 1185 vm_map_entry_t tmp; 1186 1187 tmp = *b; 1188 *b = *a; 1189 *a = tmp; 1190 } 1191 1192 /* 1193 * Walk back up the two spines, flip the pointers and set max_free. The 1194 * subtrees of the root go at the bottom of llist and rlist. 1195 */ 1196 static vm_size_t 1197 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root, 1198 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist) 1199 { 1200 do { 1201 /* 1202 * The max_free values of the children of llist are in 1203 * llist->max_free and max_free. Update with the 1204 * max value. 1205 */ 1206 llist->max_free = max_free = 1207 vm_size_max(llist->max_free, max_free); 1208 vm_map_entry_swap(&llist->right, &tail); 1209 vm_map_entry_swap(&tail, &llist); 1210 } while (llist != header); 1211 root->left = tail; 1212 return (max_free); 1213 } 1214 1215 /* 1216 * When llist is known to be the predecessor of root. 1217 */ 1218 static inline vm_size_t 1219 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root, 1220 vm_map_entry_t llist) 1221 { 1222 vm_size_t max_free; 1223 1224 max_free = root->start - llist->end; 1225 if (llist != header) { 1226 max_free = vm_map_splay_merge_left_walk(header, root, 1227 root, max_free, llist); 1228 } else { 1229 root->left = header; 1230 header->right = root; 1231 } 1232 return (max_free); 1233 } 1234 1235 /* 1236 * When llist may or may not be the predecessor of root. 1237 */ 1238 static inline vm_size_t 1239 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root, 1240 vm_map_entry_t llist) 1241 { 1242 vm_size_t max_free; 1243 1244 max_free = vm_map_entry_max_free_left(root, llist); 1245 if (llist != header) { 1246 max_free = vm_map_splay_merge_left_walk(header, root, 1247 root->left == llist ? root : root->left, 1248 max_free, llist); 1249 } 1250 return (max_free); 1251 } 1252 1253 static vm_size_t 1254 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root, 1255 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist) 1256 { 1257 do { 1258 /* 1259 * The max_free values of the children of rlist are in 1260 * rlist->max_free and max_free. Update with the 1261 * max value. 1262 */ 1263 rlist->max_free = max_free = 1264 vm_size_max(rlist->max_free, max_free); 1265 vm_map_entry_swap(&rlist->left, &tail); 1266 vm_map_entry_swap(&tail, &rlist); 1267 } while (rlist != header); 1268 root->right = tail; 1269 return (max_free); 1270 } 1271 1272 /* 1273 * When rlist is known to be the succecessor of root. 1274 */ 1275 static inline vm_size_t 1276 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root, 1277 vm_map_entry_t rlist) 1278 { 1279 vm_size_t max_free; 1280 1281 max_free = rlist->start - root->end; 1282 if (rlist != header) { 1283 max_free = vm_map_splay_merge_right_walk(header, root, 1284 root, max_free, rlist); 1285 } else { 1286 root->right = header; 1287 header->left = root; 1288 } 1289 return (max_free); 1290 } 1291 1292 /* 1293 * When rlist may or may not be the succecessor of root. 1294 */ 1295 static inline vm_size_t 1296 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root, 1297 vm_map_entry_t rlist) 1298 { 1299 vm_size_t max_free; 1300 1301 max_free = vm_map_entry_max_free_right(root, rlist); 1302 if (rlist != header) { 1303 max_free = vm_map_splay_merge_right_walk(header, root, 1304 root->right == rlist ? root : root->right, 1305 max_free, rlist); 1306 } 1307 return (max_free); 1308 } 1309 1310 /* 1311 * vm_map_splay: 1312 * 1313 * The Sleator and Tarjan top-down splay algorithm with the 1314 * following variation. Max_free must be computed bottom-up, so 1315 * on the downward pass, maintain the left and right spines in 1316 * reverse order. Then, make a second pass up each side to fix 1317 * the pointers and compute max_free. The time bound is O(log n) 1318 * amortized. 1319 * 1320 * The tree is threaded, which means that there are no null pointers. 1321 * When a node has no left child, its left pointer points to its 1322 * predecessor, which the last ancestor on the search path from the root 1323 * where the search branched right. Likewise, when a node has no right 1324 * child, its right pointer points to its successor. The map header node 1325 * is the predecessor of the first map entry, and the successor of the 1326 * last. 1327 * 1328 * The new root is the vm_map_entry containing "addr", or else an 1329 * adjacent entry (lower if possible) if addr is not in the tree. 1330 * 1331 * The map must be locked, and leaves it so. 1332 * 1333 * Returns: the new root. 1334 */ 1335 static vm_map_entry_t 1336 vm_map_splay(vm_map_t map, vm_offset_t addr) 1337 { 1338 vm_map_entry_t header, llist, rlist, root; 1339 vm_size_t max_free_left, max_free_right; 1340 1341 header = &map->header; 1342 root = vm_map_splay_split(map, addr, 0, &llist, &rlist); 1343 if (root != NULL) { 1344 max_free_left = vm_map_splay_merge_left(header, root, llist); 1345 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1346 } else if (llist != header) { 1347 /* 1348 * Recover the greatest node in the left 1349 * subtree and make it the root. 1350 */ 1351 root = llist; 1352 llist = root->right; 1353 max_free_left = vm_map_splay_merge_left(header, root, llist); 1354 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1355 } else if (rlist != header) { 1356 /* 1357 * Recover the least node in the right 1358 * subtree and make it the root. 1359 */ 1360 root = rlist; 1361 rlist = root->left; 1362 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1363 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1364 } else { 1365 /* There is no root. */ 1366 return (NULL); 1367 } 1368 root->max_free = vm_size_max(max_free_left, max_free_right); 1369 map->root = root; 1370 VM_MAP_ASSERT_CONSISTENT(map); 1371 return (root); 1372 } 1373 1374 /* 1375 * vm_map_entry_{un,}link: 1376 * 1377 * Insert/remove entries from maps. On linking, if new entry clips 1378 * existing entry, trim existing entry to avoid overlap, and manage 1379 * offsets. On unlinking, merge disappearing entry with neighbor, if 1380 * called for, and manage offsets. Callers should not modify fields in 1381 * entries already mapped. 1382 */ 1383 static void 1384 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1385 { 1386 vm_map_entry_t header, llist, rlist, root; 1387 vm_size_t max_free_left, max_free_right; 1388 1389 CTR3(KTR_VM, 1390 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1391 map->nentries, entry); 1392 VM_MAP_ASSERT_LOCKED(map); 1393 map->nentries++; 1394 header = &map->header; 1395 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1396 if (root == NULL) { 1397 /* 1398 * The new entry does not overlap any existing entry in the 1399 * map, so it becomes the new root of the map tree. 1400 */ 1401 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1402 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1403 } else if (entry->start == root->start) { 1404 /* 1405 * The new entry is a clone of root, with only the end field 1406 * changed. The root entry will be shrunk to abut the new 1407 * entry, and will be the right child of the new root entry in 1408 * the modified map. 1409 */ 1410 KASSERT(entry->end < root->end, 1411 ("%s: clip_start not within entry", __func__)); 1412 vm_map_splay_findprev(root, &llist); 1413 if ((root->eflags & (MAP_ENTRY_STACK_GAP_DN | 1414 MAP_ENTRY_STACK_GAP_UP)) == 0) 1415 root->offset += entry->end - root->start; 1416 root->start = entry->end; 1417 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1418 max_free_right = root->max_free = vm_size_max( 1419 vm_map_splay_merge_pred(entry, root, entry), 1420 vm_map_splay_merge_right(header, root, rlist)); 1421 } else { 1422 /* 1423 * The new entry is a clone of root, with only the start field 1424 * changed. The root entry will be shrunk to abut the new 1425 * entry, and will be the left child of the new root entry in 1426 * the modified map. 1427 */ 1428 KASSERT(entry->end == root->end, 1429 ("%s: clip_start not within entry", __func__)); 1430 vm_map_splay_findnext(root, &rlist); 1431 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 1432 MAP_ENTRY_STACK_GAP_UP)) == 0) 1433 entry->offset += entry->start - root->start; 1434 root->end = entry->start; 1435 max_free_left = root->max_free = vm_size_max( 1436 vm_map_splay_merge_left(header, root, llist), 1437 vm_map_splay_merge_succ(entry, root, entry)); 1438 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1439 } 1440 entry->max_free = vm_size_max(max_free_left, max_free_right); 1441 map->root = entry; 1442 VM_MAP_ASSERT_CONSISTENT(map); 1443 } 1444 1445 enum unlink_merge_type { 1446 UNLINK_MERGE_NONE, 1447 UNLINK_MERGE_NEXT 1448 }; 1449 1450 static void 1451 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, 1452 enum unlink_merge_type op) 1453 { 1454 vm_map_entry_t header, llist, rlist, root; 1455 vm_size_t max_free_left, max_free_right; 1456 1457 VM_MAP_ASSERT_LOCKED(map); 1458 header = &map->header; 1459 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1460 KASSERT(root != NULL, 1461 ("vm_map_entry_unlink: unlink object not mapped")); 1462 1463 vm_map_splay_findprev(root, &llist); 1464 vm_map_splay_findnext(root, &rlist); 1465 if (op == UNLINK_MERGE_NEXT) { 1466 rlist->start = root->start; 1467 MPASS((rlist->eflags & (MAP_ENTRY_STACK_GAP_DN | 1468 MAP_ENTRY_STACK_GAP_UP)) == 0); 1469 rlist->offset = root->offset; 1470 } 1471 if (llist != header) { 1472 root = llist; 1473 llist = root->right; 1474 max_free_left = vm_map_splay_merge_left(header, root, llist); 1475 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1476 } else if (rlist != header) { 1477 root = rlist; 1478 rlist = root->left; 1479 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1480 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1481 } else { 1482 header->left = header->right = header; 1483 root = NULL; 1484 } 1485 if (root != NULL) 1486 root->max_free = vm_size_max(max_free_left, max_free_right); 1487 map->root = root; 1488 VM_MAP_ASSERT_CONSISTENT(map); 1489 map->nentries--; 1490 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1491 map->nentries, entry); 1492 } 1493 1494 /* 1495 * vm_map_entry_resize: 1496 * 1497 * Resize a vm_map_entry, recompute the amount of free space that 1498 * follows it and propagate that value up the tree. 1499 * 1500 * The map must be locked, and leaves it so. 1501 */ 1502 static void 1503 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) 1504 { 1505 vm_map_entry_t header, llist, rlist, root; 1506 1507 VM_MAP_ASSERT_LOCKED(map); 1508 header = &map->header; 1509 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1510 KASSERT(root != NULL, ("%s: resize object not mapped", __func__)); 1511 vm_map_splay_findnext(root, &rlist); 1512 entry->end += grow_amount; 1513 root->max_free = vm_size_max( 1514 vm_map_splay_merge_left(header, root, llist), 1515 vm_map_splay_merge_succ(header, root, rlist)); 1516 map->root = root; 1517 VM_MAP_ASSERT_CONSISTENT(map); 1518 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", 1519 __func__, map, map->nentries, entry); 1520 } 1521 1522 /* 1523 * vm_map_lookup_entry: [ internal use only ] 1524 * 1525 * Finds the map entry containing (or 1526 * immediately preceding) the specified address 1527 * in the given map; the entry is returned 1528 * in the "entry" parameter. The boolean 1529 * result indicates whether the address is 1530 * actually contained in the map. 1531 */ 1532 boolean_t 1533 vm_map_lookup_entry( 1534 vm_map_t map, 1535 vm_offset_t address, 1536 vm_map_entry_t *entry) /* OUT */ 1537 { 1538 vm_map_entry_t cur, header, lbound, ubound; 1539 boolean_t locked; 1540 1541 /* 1542 * If the map is empty, then the map entry immediately preceding 1543 * "address" is the map's header. 1544 */ 1545 header = &map->header; 1546 cur = map->root; 1547 if (cur == NULL) { 1548 *entry = header; 1549 return (FALSE); 1550 } 1551 if (address >= cur->start && cur->end > address) { 1552 *entry = cur; 1553 return (TRUE); 1554 } 1555 if ((locked = vm_map_locked(map)) || 1556 sx_try_upgrade(&map->lock)) { 1557 /* 1558 * Splay requires a write lock on the map. However, it only 1559 * restructures the binary search tree; it does not otherwise 1560 * change the map. Thus, the map's timestamp need not change 1561 * on a temporary upgrade. 1562 */ 1563 cur = vm_map_splay(map, address); 1564 if (!locked) { 1565 VM_MAP_UNLOCK_CONSISTENT(map); 1566 sx_downgrade(&map->lock); 1567 } 1568 1569 /* 1570 * If "address" is contained within a map entry, the new root 1571 * is that map entry. Otherwise, the new root is a map entry 1572 * immediately before or after "address". 1573 */ 1574 if (address < cur->start) { 1575 *entry = header; 1576 return (FALSE); 1577 } 1578 *entry = cur; 1579 return (address < cur->end); 1580 } 1581 /* 1582 * Since the map is only locked for read access, perform a 1583 * standard binary search tree lookup for "address". 1584 */ 1585 lbound = ubound = header; 1586 for (;;) { 1587 if (address < cur->start) { 1588 ubound = cur; 1589 cur = cur->left; 1590 if (cur == lbound) 1591 break; 1592 } else if (cur->end <= address) { 1593 lbound = cur; 1594 cur = cur->right; 1595 if (cur == ubound) 1596 break; 1597 } else { 1598 *entry = cur; 1599 return (TRUE); 1600 } 1601 } 1602 *entry = lbound; 1603 return (FALSE); 1604 } 1605 1606 /* 1607 * vm_map_insert1() is identical to vm_map_insert() except that it 1608 * returns the newly inserted map entry in '*res'. In case the new 1609 * entry is coalesced with a neighbor or an existing entry was 1610 * resized, that entry is returned. In any case, the returned entry 1611 * covers the specified address range. 1612 */ 1613 static int 1614 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1615 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow, 1616 vm_map_entry_t *res) 1617 { 1618 vm_map_entry_t new_entry, next_entry, prev_entry; 1619 struct ucred *cred; 1620 vm_eflags_t protoeflags; 1621 vm_inherit_t inheritance; 1622 u_long bdry; 1623 u_int bidx; 1624 1625 VM_MAP_ASSERT_LOCKED(map); 1626 KASSERT(object != kernel_object || 1627 (cow & MAP_COPY_ON_WRITE) == 0, 1628 ("vm_map_insert: kernel object and COW")); 1629 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 || 1630 (cow & MAP_SPLIT_BOUNDARY_MASK) != 0, 1631 ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x", 1632 object, cow)); 1633 KASSERT((prot & ~max) == 0, 1634 ("prot %#x is not subset of max_prot %#x", prot, max)); 1635 1636 /* 1637 * Check that the start and end points are not bogus. 1638 */ 1639 if (start == end || !vm_map_range_valid(map, start, end)) 1640 return (KERN_INVALID_ADDRESS); 1641 1642 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | 1643 VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) 1644 return (KERN_PROTECTION_FAILURE); 1645 1646 /* 1647 * Find the entry prior to the proposed starting address; if it's part 1648 * of an existing entry, this range is bogus. 1649 */ 1650 if (vm_map_lookup_entry(map, start, &prev_entry)) 1651 return (KERN_NO_SPACE); 1652 1653 /* 1654 * Assert that the next entry doesn't overlap the end point. 1655 */ 1656 next_entry = vm_map_entry_succ(prev_entry); 1657 if (next_entry->start < end) 1658 return (KERN_NO_SPACE); 1659 1660 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1661 max != VM_PROT_NONE)) 1662 return (KERN_INVALID_ARGUMENT); 1663 1664 protoeflags = 0; 1665 if (cow & MAP_COPY_ON_WRITE) 1666 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1667 if (cow & MAP_NOFAULT) 1668 protoeflags |= MAP_ENTRY_NOFAULT; 1669 if (cow & MAP_DISABLE_SYNCER) 1670 protoeflags |= MAP_ENTRY_NOSYNC; 1671 if (cow & MAP_DISABLE_COREDUMP) 1672 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1673 if (cow & MAP_STACK_GROWS_DOWN) 1674 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1675 if (cow & MAP_STACK_GROWS_UP) 1676 protoeflags |= MAP_ENTRY_GROWS_UP; 1677 if (cow & MAP_WRITECOUNT) 1678 protoeflags |= MAP_ENTRY_WRITECNT; 1679 if (cow & MAP_VN_EXEC) 1680 protoeflags |= MAP_ENTRY_VN_EXEC; 1681 if ((cow & MAP_CREATE_GUARD) != 0) 1682 protoeflags |= MAP_ENTRY_GUARD; 1683 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1684 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1685 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1686 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1687 if (cow & MAP_INHERIT_SHARE) 1688 inheritance = VM_INHERIT_SHARE; 1689 else 1690 inheritance = VM_INHERIT_DEFAULT; 1691 if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) { 1692 /* This magically ignores index 0, for usual page size. */ 1693 bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >> 1694 MAP_SPLIT_BOUNDARY_SHIFT; 1695 if (bidx >= MAXPAGESIZES) 1696 return (KERN_INVALID_ARGUMENT); 1697 bdry = pagesizes[bidx] - 1; 1698 if ((start & bdry) != 0 || (end & bdry) != 0) 1699 return (KERN_INVALID_ARGUMENT); 1700 protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 1701 } 1702 1703 cred = NULL; 1704 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1705 goto charged; 1706 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1707 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1708 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1709 return (KERN_RESOURCE_SHORTAGE); 1710 KASSERT(object == NULL || 1711 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1712 object->cred == NULL, 1713 ("overcommit: vm_map_insert o %p", object)); 1714 cred = curthread->td_ucred; 1715 } 1716 1717 charged: 1718 /* Expand the kernel pmap, if necessary. */ 1719 if (map == kernel_map && end > kernel_vm_end) 1720 pmap_growkernel(end); 1721 if (object != NULL) { 1722 /* 1723 * OBJ_ONEMAPPING must be cleared unless this mapping 1724 * is trivially proven to be the only mapping for any 1725 * of the object's pages. (Object granularity 1726 * reference counting is insufficient to recognize 1727 * aliases with precision.) 1728 */ 1729 if ((object->flags & OBJ_ANON) != 0) { 1730 VM_OBJECT_WLOCK(object); 1731 if (object->ref_count > 1 || object->shadow_count != 0) 1732 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1733 VM_OBJECT_WUNLOCK(object); 1734 } 1735 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1736 protoeflags && 1737 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP | 1738 MAP_VN_EXEC)) == 0 && 1739 prev_entry->end == start && (prev_entry->cred == cred || 1740 (prev_entry->object.vm_object != NULL && 1741 prev_entry->object.vm_object->cred == cred)) && 1742 vm_object_coalesce(prev_entry->object.vm_object, 1743 prev_entry->offset, 1744 (vm_size_t)(prev_entry->end - prev_entry->start), 1745 (vm_size_t)(end - prev_entry->end), cred != NULL && 1746 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1747 /* 1748 * We were able to extend the object. Determine if we 1749 * can extend the previous map entry to include the 1750 * new range as well. 1751 */ 1752 if (prev_entry->inheritance == inheritance && 1753 prev_entry->protection == prot && 1754 prev_entry->max_protection == max && 1755 prev_entry->wired_count == 0) { 1756 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1757 0, ("prev_entry %p has incoherent wiring", 1758 prev_entry)); 1759 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1760 map->size += end - prev_entry->end; 1761 vm_map_entry_resize(map, prev_entry, 1762 end - prev_entry->end); 1763 *res = vm_map_try_merge_entries(map, prev_entry, 1764 next_entry); 1765 return (KERN_SUCCESS); 1766 } 1767 1768 /* 1769 * If we can extend the object but cannot extend the 1770 * map entry, we have to create a new map entry. We 1771 * must bump the ref count on the extended object to 1772 * account for it. object may be NULL. 1773 */ 1774 object = prev_entry->object.vm_object; 1775 offset = prev_entry->offset + 1776 (prev_entry->end - prev_entry->start); 1777 vm_object_reference(object); 1778 if (cred != NULL && object != NULL && object->cred != NULL && 1779 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1780 /* Object already accounts for this uid. */ 1781 cred = NULL; 1782 } 1783 } 1784 if (cred != NULL) 1785 crhold(cred); 1786 1787 /* 1788 * Create a new entry 1789 */ 1790 new_entry = vm_map_entry_create(map); 1791 new_entry->start = start; 1792 new_entry->end = end; 1793 new_entry->cred = NULL; 1794 1795 new_entry->eflags = protoeflags; 1796 new_entry->object.vm_object = object; 1797 new_entry->offset = offset; 1798 1799 new_entry->inheritance = inheritance; 1800 new_entry->protection = prot; 1801 new_entry->max_protection = max; 1802 new_entry->wired_count = 0; 1803 new_entry->wiring_thread = NULL; 1804 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1805 new_entry->next_read = start; 1806 1807 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1808 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1809 new_entry->cred = cred; 1810 1811 /* 1812 * Insert the new entry into the list 1813 */ 1814 vm_map_entry_link(map, new_entry); 1815 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1816 map->size += new_entry->end - new_entry->start; 1817 1818 /* 1819 * Try to coalesce the new entry with both the previous and next 1820 * entries in the list. Previously, we only attempted to coalesce 1821 * with the previous entry when object is NULL. Here, we handle the 1822 * other cases, which are less common. 1823 */ 1824 vm_map_try_merge_entries(map, prev_entry, new_entry); 1825 *res = vm_map_try_merge_entries(map, new_entry, next_entry); 1826 1827 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1828 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1829 end - start, cow & MAP_PREFAULT_PARTIAL); 1830 } 1831 1832 return (KERN_SUCCESS); 1833 } 1834 1835 /* 1836 * vm_map_insert: 1837 * 1838 * Inserts the given VM object into the target map at the 1839 * specified address range. 1840 * 1841 * Requires that the map be locked, and leaves it so. 1842 * 1843 * If object is non-NULL, ref count must be bumped by caller 1844 * prior to making call to account for the new entry. 1845 */ 1846 int 1847 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1848 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1849 { 1850 vm_map_entry_t res; 1851 1852 return (vm_map_insert1(map, object, offset, start, end, prot, max, 1853 cow, &res)); 1854 } 1855 1856 /* 1857 * vm_map_findspace: 1858 * 1859 * Find the first fit (lowest VM address) for "length" free bytes 1860 * beginning at address >= start in the given map. 1861 * 1862 * In a vm_map_entry, "max_free" is the maximum amount of 1863 * contiguous free space between an entry in its subtree and a 1864 * neighbor of that entry. This allows finding a free region in 1865 * one path down the tree, so O(log n) amortized with splay 1866 * trees. 1867 * 1868 * The map must be locked, and leaves it so. 1869 * 1870 * Returns: starting address if sufficient space, 1871 * vm_map_max(map)-length+1 if insufficient space. 1872 */ 1873 vm_offset_t 1874 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1875 { 1876 vm_map_entry_t header, llist, rlist, root, y; 1877 vm_size_t left_length, max_free_left, max_free_right; 1878 vm_offset_t gap_end; 1879 1880 VM_MAP_ASSERT_LOCKED(map); 1881 1882 /* 1883 * Request must fit within min/max VM address and must avoid 1884 * address wrap. 1885 */ 1886 start = MAX(start, vm_map_min(map)); 1887 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) 1888 return (vm_map_max(map) - length + 1); 1889 1890 /* Empty tree means wide open address space. */ 1891 if (map->root == NULL) 1892 return (start); 1893 1894 /* 1895 * After splay_split, if start is within an entry, push it to the start 1896 * of the following gap. If rlist is at the end of the gap containing 1897 * start, save the end of that gap in gap_end to see if the gap is big 1898 * enough; otherwise set gap_end to start skip gap-checking and move 1899 * directly to a search of the right subtree. 1900 */ 1901 header = &map->header; 1902 root = vm_map_splay_split(map, start, length, &llist, &rlist); 1903 gap_end = rlist->start; 1904 if (root != NULL) { 1905 start = root->end; 1906 if (root->right != rlist) 1907 gap_end = start; 1908 max_free_left = vm_map_splay_merge_left(header, root, llist); 1909 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1910 } else if (rlist != header) { 1911 root = rlist; 1912 rlist = root->left; 1913 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1914 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1915 } else { 1916 root = llist; 1917 llist = root->right; 1918 max_free_left = vm_map_splay_merge_left(header, root, llist); 1919 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1920 } 1921 root->max_free = vm_size_max(max_free_left, max_free_right); 1922 map->root = root; 1923 VM_MAP_ASSERT_CONSISTENT(map); 1924 if (length <= gap_end - start) 1925 return (start); 1926 1927 /* With max_free, can immediately tell if no solution. */ 1928 if (root->right == header || length > root->right->max_free) 1929 return (vm_map_max(map) - length + 1); 1930 1931 /* 1932 * Splay for the least large-enough gap in the right subtree. 1933 */ 1934 llist = rlist = header; 1935 for (left_length = 0;; 1936 left_length = vm_map_entry_max_free_left(root, llist)) { 1937 if (length <= left_length) 1938 SPLAY_LEFT_STEP(root, y, llist, rlist, 1939 length <= vm_map_entry_max_free_left(y, llist)); 1940 else 1941 SPLAY_RIGHT_STEP(root, y, llist, rlist, 1942 length > vm_map_entry_max_free_left(y, root)); 1943 if (root == NULL) 1944 break; 1945 } 1946 root = llist; 1947 llist = root->right; 1948 max_free_left = vm_map_splay_merge_left(header, root, llist); 1949 if (rlist == header) { 1950 root->max_free = vm_size_max(max_free_left, 1951 vm_map_splay_merge_succ(header, root, rlist)); 1952 } else { 1953 y = rlist; 1954 rlist = y->left; 1955 y->max_free = vm_size_max( 1956 vm_map_splay_merge_pred(root, y, root), 1957 vm_map_splay_merge_right(header, y, rlist)); 1958 root->max_free = vm_size_max(max_free_left, y->max_free); 1959 } 1960 map->root = root; 1961 VM_MAP_ASSERT_CONSISTENT(map); 1962 return (root->end); 1963 } 1964 1965 int 1966 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1967 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1968 vm_prot_t max, int cow) 1969 { 1970 vm_offset_t end; 1971 int result; 1972 1973 end = start + length; 1974 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1975 object == NULL, 1976 ("vm_map_fixed: non-NULL backing object for stack")); 1977 vm_map_lock(map); 1978 VM_MAP_RANGE_CHECK(map, start, end); 1979 if ((cow & MAP_CHECK_EXCL) == 0) { 1980 result = vm_map_delete(map, start, end); 1981 if (result != KERN_SUCCESS) 1982 goto out; 1983 } 1984 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1985 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1986 prot, max, cow); 1987 } else { 1988 result = vm_map_insert(map, object, offset, start, end, 1989 prot, max, cow); 1990 } 1991 out: 1992 vm_map_unlock(map); 1993 return (result); 1994 } 1995 1996 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1997 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1998 1999 static int cluster_anon = 1; 2000 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 2001 &cluster_anon, 0, 2002 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 2003 2004 static bool 2005 clustering_anon_allowed(vm_offset_t addr, int cow) 2006 { 2007 2008 switch (cluster_anon) { 2009 case 0: 2010 return (false); 2011 case 1: 2012 return (addr == 0 || (cow & MAP_NO_HINT) != 0); 2013 case 2: 2014 default: 2015 return (true); 2016 } 2017 } 2018 2019 static long aslr_restarts; 2020 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 2021 &aslr_restarts, 0, 2022 "Number of aslr failures"); 2023 2024 /* 2025 * Searches for the specified amount of free space in the given map with the 2026 * specified alignment. Performs an address-ordered, first-fit search from 2027 * the given address "*addr", with an optional upper bound "max_addr". If the 2028 * parameter "alignment" is zero, then the alignment is computed from the 2029 * given (object, offset) pair so as to enable the greatest possible use of 2030 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 2031 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 2032 * 2033 * The map must be locked. Initially, there must be at least "length" bytes 2034 * of free space at the given address. 2035 */ 2036 static int 2037 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2038 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 2039 vm_offset_t alignment) 2040 { 2041 vm_offset_t aligned_addr, free_addr; 2042 2043 VM_MAP_ASSERT_LOCKED(map); 2044 free_addr = *addr; 2045 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 2046 ("caller failed to provide space %#jx at address %p", 2047 (uintmax_t)length, (void *)free_addr)); 2048 for (;;) { 2049 /* 2050 * At the start of every iteration, the free space at address 2051 * "*addr" is at least "length" bytes. 2052 */ 2053 if (alignment == 0) 2054 pmap_align_superpage(object, offset, addr, length); 2055 else 2056 *addr = roundup2(*addr, alignment); 2057 aligned_addr = *addr; 2058 if (aligned_addr == free_addr) { 2059 /* 2060 * Alignment did not change "*addr", so "*addr" must 2061 * still provide sufficient free space. 2062 */ 2063 return (KERN_SUCCESS); 2064 } 2065 2066 /* 2067 * Test for address wrap on "*addr". A wrapped "*addr" could 2068 * be a valid address, in which case vm_map_findspace() cannot 2069 * be relied upon to fail. 2070 */ 2071 if (aligned_addr < free_addr) 2072 return (KERN_NO_SPACE); 2073 *addr = vm_map_findspace(map, aligned_addr, length); 2074 if (*addr + length > vm_map_max(map) || 2075 (max_addr != 0 && *addr + length > max_addr)) 2076 return (KERN_NO_SPACE); 2077 free_addr = *addr; 2078 if (free_addr == aligned_addr) { 2079 /* 2080 * If a successful call to vm_map_findspace() did not 2081 * change "*addr", then "*addr" must still be aligned 2082 * and provide sufficient free space. 2083 */ 2084 return (KERN_SUCCESS); 2085 } 2086 } 2087 } 2088 2089 int 2090 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length, 2091 vm_offset_t max_addr, vm_offset_t alignment) 2092 { 2093 /* XXXKIB ASLR eh ? */ 2094 *addr = vm_map_findspace(map, *addr, length); 2095 if (*addr + length > vm_map_max(map) || 2096 (max_addr != 0 && *addr + length > max_addr)) 2097 return (KERN_NO_SPACE); 2098 return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr, 2099 alignment)); 2100 } 2101 2102 /* 2103 * vm_map_find finds an unallocated region in the target address 2104 * map with the given length. The search is defined to be 2105 * first-fit from the specified address; the region found is 2106 * returned in the same parameter. 2107 * 2108 * If object is non-NULL, ref count must be bumped by caller 2109 * prior to making call to account for the new entry. 2110 */ 2111 int 2112 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2113 vm_offset_t *addr, /* IN/OUT */ 2114 vm_size_t length, vm_offset_t max_addr, int find_space, 2115 vm_prot_t prot, vm_prot_t max, int cow) 2116 { 2117 vm_offset_t alignment, curr_min_addr, min_addr; 2118 int gap, pidx, rv, try; 2119 bool cluster, en_aslr, update_anon; 2120 2121 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 2122 object == NULL, 2123 ("vm_map_find: non-NULL backing object for stack")); 2124 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 2125 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 2126 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 2127 (object->flags & OBJ_COLORED) == 0)) 2128 find_space = VMFS_ANY_SPACE; 2129 if (find_space >> 8 != 0) { 2130 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 2131 alignment = (vm_offset_t)1 << (find_space >> 8); 2132 } else 2133 alignment = 0; 2134 en_aslr = (map->flags & MAP_ASLR) != 0; 2135 update_anon = cluster = clustering_anon_allowed(*addr, cow) && 2136 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 2137 find_space != VMFS_NO_SPACE && object == NULL && 2138 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP | 2139 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE; 2140 curr_min_addr = min_addr = *addr; 2141 if (en_aslr && min_addr == 0 && !cluster && 2142 find_space != VMFS_NO_SPACE && 2143 (map->flags & MAP_ASLR_IGNSTART) != 0) 2144 curr_min_addr = min_addr = vm_map_min(map); 2145 try = 0; 2146 vm_map_lock(map); 2147 if (cluster) { 2148 curr_min_addr = map->anon_loc; 2149 if (curr_min_addr == 0) 2150 cluster = false; 2151 } 2152 if (find_space != VMFS_NO_SPACE) { 2153 KASSERT(find_space == VMFS_ANY_SPACE || 2154 find_space == VMFS_OPTIMAL_SPACE || 2155 find_space == VMFS_SUPER_SPACE || 2156 alignment != 0, ("unexpected VMFS flag")); 2157 again: 2158 /* 2159 * When creating an anonymous mapping, try clustering 2160 * with an existing anonymous mapping first. 2161 * 2162 * We make up to two attempts to find address space 2163 * for a given find_space value. The first attempt may 2164 * apply randomization or may cluster with an existing 2165 * anonymous mapping. If this first attempt fails, 2166 * perform a first-fit search of the available address 2167 * space. 2168 * 2169 * If all tries failed, and find_space is 2170 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 2171 * Again enable clustering and randomization. 2172 */ 2173 try++; 2174 MPASS(try <= 2); 2175 2176 if (try == 2) { 2177 /* 2178 * Second try: we failed either to find a 2179 * suitable region for randomizing the 2180 * allocation, or to cluster with an existing 2181 * mapping. Retry with free run. 2182 */ 2183 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 2184 vm_map_min(map) : min_addr; 2185 atomic_add_long(&aslr_restarts, 1); 2186 } 2187 2188 if (try == 1 && en_aslr && !cluster) { 2189 /* 2190 * Find space for allocation, including 2191 * gap needed for later randomization. 2192 */ 2193 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && 2194 (find_space == VMFS_SUPER_SPACE || find_space == 2195 VMFS_OPTIMAL_SPACE) ? 1 : 0; 2196 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 2197 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 2198 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 2199 *addr = vm_map_findspace(map, curr_min_addr, 2200 length + gap * pagesizes[pidx]); 2201 if (*addr + length + gap * pagesizes[pidx] > 2202 vm_map_max(map)) 2203 goto again; 2204 /* And randomize the start address. */ 2205 *addr += (arc4random() % gap) * pagesizes[pidx]; 2206 if (max_addr != 0 && *addr + length > max_addr) 2207 goto again; 2208 } else { 2209 *addr = vm_map_findspace(map, curr_min_addr, length); 2210 if (*addr + length > vm_map_max(map) || 2211 (max_addr != 0 && *addr + length > max_addr)) { 2212 if (cluster) { 2213 cluster = false; 2214 MPASS(try == 1); 2215 goto again; 2216 } 2217 rv = KERN_NO_SPACE; 2218 goto done; 2219 } 2220 } 2221 2222 if (find_space != VMFS_ANY_SPACE && 2223 (rv = vm_map_alignspace(map, object, offset, addr, length, 2224 max_addr, alignment)) != KERN_SUCCESS) { 2225 if (find_space == VMFS_OPTIMAL_SPACE) { 2226 find_space = VMFS_ANY_SPACE; 2227 curr_min_addr = min_addr; 2228 cluster = update_anon; 2229 try = 0; 2230 goto again; 2231 } 2232 goto done; 2233 } 2234 } else if ((cow & MAP_REMAP) != 0) { 2235 if (!vm_map_range_valid(map, *addr, *addr + length)) { 2236 rv = KERN_INVALID_ADDRESS; 2237 goto done; 2238 } 2239 rv = vm_map_delete(map, *addr, *addr + length); 2240 if (rv != KERN_SUCCESS) 2241 goto done; 2242 } 2243 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 2244 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 2245 max, cow); 2246 } else { 2247 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 2248 prot, max, cow); 2249 } 2250 2251 /* 2252 * Update the starting address for clustered anonymous memory mappings 2253 * if a starting address was not previously defined or an ASLR restart 2254 * placed an anonymous memory mapping at a lower address. 2255 */ 2256 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || 2257 *addr < map->anon_loc)) 2258 map->anon_loc = *addr; 2259 done: 2260 vm_map_unlock(map); 2261 return (rv); 2262 } 2263 2264 /* 2265 * vm_map_find_min() is a variant of vm_map_find() that takes an 2266 * additional parameter ("default_addr") and treats the given address 2267 * ("*addr") differently. Specifically, it treats "*addr" as a hint 2268 * and not as the minimum address where the mapping is created. 2269 * 2270 * This function works in two phases. First, it tries to 2271 * allocate above the hint. If that fails and the hint is 2272 * greater than "default_addr", it performs a second pass, replacing 2273 * the hint with "default_addr" as the minimum address for the 2274 * allocation. 2275 */ 2276 int 2277 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2278 vm_offset_t *addr, vm_size_t length, vm_offset_t default_addr, 2279 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 2280 int cow) 2281 { 2282 vm_offset_t hint; 2283 int rv; 2284 2285 hint = *addr; 2286 if (hint == 0) { 2287 cow |= MAP_NO_HINT; 2288 *addr = hint = default_addr; 2289 } 2290 for (;;) { 2291 rv = vm_map_find(map, object, offset, addr, length, max_addr, 2292 find_space, prot, max, cow); 2293 if (rv == KERN_SUCCESS || default_addr >= hint) 2294 return (rv); 2295 *addr = hint = default_addr; 2296 } 2297 } 2298 2299 /* 2300 * A map entry with any of the following flags set must not be merged with 2301 * another entry. 2302 */ 2303 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 2304 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \ 2305 MAP_ENTRY_STACK_GAP_UP | MAP_ENTRY_STACK_GAP_DN) 2306 2307 static bool 2308 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 2309 { 2310 2311 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 2312 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 2313 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 2314 prev, entry)); 2315 return (prev->end == entry->start && 2316 prev->object.vm_object == entry->object.vm_object && 2317 (prev->object.vm_object == NULL || 2318 prev->offset + (prev->end - prev->start) == entry->offset) && 2319 prev->eflags == entry->eflags && 2320 prev->protection == entry->protection && 2321 prev->max_protection == entry->max_protection && 2322 prev->inheritance == entry->inheritance && 2323 prev->wired_count == entry->wired_count && 2324 prev->cred == entry->cred); 2325 } 2326 2327 static void 2328 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2329 { 2330 2331 /* 2332 * If the backing object is a vnode object, vm_object_deallocate() 2333 * calls vrele(). However, vrele() does not lock the vnode because 2334 * the vnode has additional references. Thus, the map lock can be 2335 * kept without causing a lock-order reversal with the vnode lock. 2336 * 2337 * Since we count the number of virtual page mappings in 2338 * object->un_pager.vnp.writemappings, the writemappings value 2339 * should not be adjusted when the entry is disposed of. 2340 */ 2341 if (entry->object.vm_object != NULL) 2342 vm_object_deallocate(entry->object.vm_object); 2343 if (entry->cred != NULL) 2344 crfree(entry->cred); 2345 vm_map_entry_dispose(map, entry); 2346 } 2347 2348 /* 2349 * vm_map_try_merge_entries: 2350 * 2351 * Compare two map entries that represent consecutive ranges. If 2352 * the entries can be merged, expand the range of the second to 2353 * cover the range of the first and delete the first. Then return 2354 * the map entry that includes the first range. 2355 * 2356 * The map must be locked. 2357 */ 2358 vm_map_entry_t 2359 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, 2360 vm_map_entry_t entry) 2361 { 2362 2363 VM_MAP_ASSERT_LOCKED(map); 2364 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && 2365 vm_map_mergeable_neighbors(prev_entry, entry)) { 2366 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); 2367 vm_map_merged_neighbor_dispose(map, prev_entry); 2368 return (entry); 2369 } 2370 return (prev_entry); 2371 } 2372 2373 /* 2374 * vm_map_entry_back: 2375 * 2376 * Allocate an object to back a map entry. 2377 */ 2378 static inline void 2379 vm_map_entry_back(vm_map_entry_t entry) 2380 { 2381 vm_object_t object; 2382 2383 KASSERT(entry->object.vm_object == NULL, 2384 ("map entry %p has backing object", entry)); 2385 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2386 ("map entry %p is a submap", entry)); 2387 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, 2388 entry->cred, entry->end - entry->start); 2389 entry->object.vm_object = object; 2390 entry->offset = 0; 2391 entry->cred = NULL; 2392 } 2393 2394 /* 2395 * vm_map_entry_charge_object 2396 * 2397 * If there is no object backing this entry, create one. Otherwise, if 2398 * the entry has cred, give it to the backing object. 2399 */ 2400 static inline void 2401 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) 2402 { 2403 2404 VM_MAP_ASSERT_LOCKED(map); 2405 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2406 ("map entry %p is a submap", entry)); 2407 if (entry->object.vm_object == NULL && !map->system_map && 2408 (entry->eflags & MAP_ENTRY_GUARD) == 0) 2409 vm_map_entry_back(entry); 2410 else if (entry->object.vm_object != NULL && 2411 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2412 entry->cred != NULL) { 2413 VM_OBJECT_WLOCK(entry->object.vm_object); 2414 KASSERT(entry->object.vm_object->cred == NULL, 2415 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); 2416 entry->object.vm_object->cred = entry->cred; 2417 entry->object.vm_object->charge = entry->end - entry->start; 2418 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2419 entry->cred = NULL; 2420 } 2421 } 2422 2423 /* 2424 * vm_map_entry_clone 2425 * 2426 * Create a duplicate map entry for clipping. 2427 */ 2428 static vm_map_entry_t 2429 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry) 2430 { 2431 vm_map_entry_t new_entry; 2432 2433 VM_MAP_ASSERT_LOCKED(map); 2434 2435 /* 2436 * Create a backing object now, if none exists, so that more individual 2437 * objects won't be created after the map entry is split. 2438 */ 2439 vm_map_entry_charge_object(map, entry); 2440 2441 /* Clone the entry. */ 2442 new_entry = vm_map_entry_create(map); 2443 *new_entry = *entry; 2444 if (new_entry->cred != NULL) 2445 crhold(entry->cred); 2446 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2447 vm_object_reference(new_entry->object.vm_object); 2448 vm_map_entry_set_vnode_text(new_entry, true); 2449 /* 2450 * The object->un_pager.vnp.writemappings for the object of 2451 * MAP_ENTRY_WRITECNT type entry shall be kept as is here. The 2452 * virtual pages are re-distributed among the clipped entries, 2453 * so the sum is left the same. 2454 */ 2455 } 2456 return (new_entry); 2457 } 2458 2459 /* 2460 * vm_map_clip_start: [ internal use only ] 2461 * 2462 * Asserts that the given entry begins at or after 2463 * the specified address; if necessary, 2464 * it splits the entry into two. 2465 */ 2466 static int 2467 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr) 2468 { 2469 vm_map_entry_t new_entry; 2470 int bdry_idx; 2471 2472 if (!map->system_map) 2473 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2474 "%s: map %p entry %p start 0x%jx", __func__, map, entry, 2475 (uintmax_t)startaddr); 2476 2477 if (startaddr <= entry->start) 2478 return (KERN_SUCCESS); 2479 2480 VM_MAP_ASSERT_LOCKED(map); 2481 KASSERT(entry->end > startaddr && entry->start < startaddr, 2482 ("%s: invalid clip of entry %p", __func__, entry)); 2483 2484 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2485 if (bdry_idx != 0) { 2486 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) 2487 return (KERN_INVALID_ARGUMENT); 2488 } 2489 2490 new_entry = vm_map_entry_clone(map, entry); 2491 2492 /* 2493 * Split off the front portion. Insert the new entry BEFORE this one, 2494 * so that this entry has the specified starting address. 2495 */ 2496 new_entry->end = startaddr; 2497 vm_map_entry_link(map, new_entry); 2498 return (KERN_SUCCESS); 2499 } 2500 2501 /* 2502 * vm_map_lookup_clip_start: 2503 * 2504 * Find the entry at or just after 'start', and clip it if 'start' is in 2505 * the interior of the entry. Return entry after 'start', and in 2506 * prev_entry set the entry before 'start'. 2507 */ 2508 static int 2509 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, 2510 vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry) 2511 { 2512 vm_map_entry_t entry; 2513 int rv; 2514 2515 if (!map->system_map) 2516 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2517 "%s: map %p start 0x%jx prev %p", __func__, map, 2518 (uintmax_t)start, prev_entry); 2519 2520 if (vm_map_lookup_entry(map, start, prev_entry)) { 2521 entry = *prev_entry; 2522 rv = vm_map_clip_start(map, entry, start); 2523 if (rv != KERN_SUCCESS) 2524 return (rv); 2525 *prev_entry = vm_map_entry_pred(entry); 2526 } else 2527 entry = vm_map_entry_succ(*prev_entry); 2528 *res_entry = entry; 2529 return (KERN_SUCCESS); 2530 } 2531 2532 /* 2533 * vm_map_clip_end: [ internal use only ] 2534 * 2535 * Asserts that the given entry ends at or before 2536 * the specified address; if necessary, 2537 * it splits the entry into two. 2538 */ 2539 static int 2540 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr) 2541 { 2542 vm_map_entry_t new_entry; 2543 int bdry_idx; 2544 2545 if (!map->system_map) 2546 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2547 "%s: map %p entry %p end 0x%jx", __func__, map, entry, 2548 (uintmax_t)endaddr); 2549 2550 if (endaddr >= entry->end) 2551 return (KERN_SUCCESS); 2552 2553 VM_MAP_ASSERT_LOCKED(map); 2554 KASSERT(entry->start < endaddr && entry->end > endaddr, 2555 ("%s: invalid clip of entry %p", __func__, entry)); 2556 2557 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2558 if (bdry_idx != 0) { 2559 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) 2560 return (KERN_INVALID_ARGUMENT); 2561 } 2562 2563 new_entry = vm_map_entry_clone(map, entry); 2564 2565 /* 2566 * Split off the back portion. Insert the new entry AFTER this one, 2567 * so that this entry has the specified ending address. 2568 */ 2569 new_entry->start = endaddr; 2570 vm_map_entry_link(map, new_entry); 2571 2572 return (KERN_SUCCESS); 2573 } 2574 2575 /* 2576 * vm_map_submap: [ kernel use only ] 2577 * 2578 * Mark the given range as handled by a subordinate map. 2579 * 2580 * This range must have been created with vm_map_find, 2581 * and no other operations may have been performed on this 2582 * range prior to calling vm_map_submap. 2583 * 2584 * Only a limited number of operations can be performed 2585 * within this rage after calling vm_map_submap: 2586 * vm_fault 2587 * [Don't try vm_map_copy!] 2588 * 2589 * To remove a submapping, one must first remove the 2590 * range from the superior map, and then destroy the 2591 * submap (if desired). [Better yet, don't try it.] 2592 */ 2593 int 2594 vm_map_submap( 2595 vm_map_t map, 2596 vm_offset_t start, 2597 vm_offset_t end, 2598 vm_map_t submap) 2599 { 2600 vm_map_entry_t entry; 2601 int result; 2602 2603 result = KERN_INVALID_ARGUMENT; 2604 2605 vm_map_lock(submap); 2606 submap->flags |= MAP_IS_SUB_MAP; 2607 vm_map_unlock(submap); 2608 2609 vm_map_lock(map); 2610 VM_MAP_RANGE_CHECK(map, start, end); 2611 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && 2612 (entry->eflags & MAP_ENTRY_COW) == 0 && 2613 entry->object.vm_object == NULL) { 2614 result = vm_map_clip_start(map, entry, start); 2615 if (result != KERN_SUCCESS) 2616 goto unlock; 2617 result = vm_map_clip_end(map, entry, end); 2618 if (result != KERN_SUCCESS) 2619 goto unlock; 2620 entry->object.sub_map = submap; 2621 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2622 result = KERN_SUCCESS; 2623 } 2624 unlock: 2625 vm_map_unlock(map); 2626 2627 if (result != KERN_SUCCESS) { 2628 vm_map_lock(submap); 2629 submap->flags &= ~MAP_IS_SUB_MAP; 2630 vm_map_unlock(submap); 2631 } 2632 return (result); 2633 } 2634 2635 /* 2636 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2637 */ 2638 #define MAX_INIT_PT 96 2639 2640 /* 2641 * vm_map_pmap_enter: 2642 * 2643 * Preload the specified map's pmap with mappings to the specified 2644 * object's memory-resident pages. No further physical pages are 2645 * allocated, and no further virtual pages are retrieved from secondary 2646 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2647 * limited number of page mappings are created at the low-end of the 2648 * specified address range. (For this purpose, a superpage mapping 2649 * counts as one page mapping.) Otherwise, all resident pages within 2650 * the specified address range are mapped. 2651 */ 2652 static void 2653 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2654 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2655 { 2656 vm_offset_t start; 2657 vm_page_t p, p_start; 2658 vm_pindex_t mask, psize, threshold, tmpidx; 2659 2660 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2661 return; 2662 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2663 VM_OBJECT_WLOCK(object); 2664 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2665 pmap_object_init_pt(map->pmap, addr, object, pindex, 2666 size); 2667 VM_OBJECT_WUNLOCK(object); 2668 return; 2669 } 2670 VM_OBJECT_LOCK_DOWNGRADE(object); 2671 } else 2672 VM_OBJECT_RLOCK(object); 2673 2674 psize = atop(size); 2675 if (psize + pindex > object->size) { 2676 if (pindex >= object->size) { 2677 VM_OBJECT_RUNLOCK(object); 2678 return; 2679 } 2680 psize = object->size - pindex; 2681 } 2682 2683 start = 0; 2684 p_start = NULL; 2685 threshold = MAX_INIT_PT; 2686 2687 p = vm_page_find_least(object, pindex); 2688 /* 2689 * Assert: the variable p is either (1) the page with the 2690 * least pindex greater than or equal to the parameter pindex 2691 * or (2) NULL. 2692 */ 2693 for (; 2694 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2695 p = TAILQ_NEXT(p, listq)) { 2696 /* 2697 * don't allow an madvise to blow away our really 2698 * free pages allocating pv entries. 2699 */ 2700 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2701 vm_page_count_severe()) || 2702 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2703 tmpidx >= threshold)) { 2704 psize = tmpidx; 2705 break; 2706 } 2707 if (vm_page_all_valid(p)) { 2708 if (p_start == NULL) { 2709 start = addr + ptoa(tmpidx); 2710 p_start = p; 2711 } 2712 /* Jump ahead if a superpage mapping is possible. */ 2713 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2714 (pagesizes[p->psind] - 1)) == 0) { 2715 mask = atop(pagesizes[p->psind]) - 1; 2716 if (tmpidx + mask < psize && 2717 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2718 p += mask; 2719 threshold += mask; 2720 } 2721 } 2722 } else if (p_start != NULL) { 2723 pmap_enter_object(map->pmap, start, addr + 2724 ptoa(tmpidx), p_start, prot); 2725 p_start = NULL; 2726 } 2727 } 2728 if (p_start != NULL) 2729 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2730 p_start, prot); 2731 VM_OBJECT_RUNLOCK(object); 2732 } 2733 2734 static void 2735 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot, 2736 vm_prot_t new_maxprot, int flags) 2737 { 2738 vm_prot_t old_prot; 2739 2740 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); 2741 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP | 2742 MAP_ENTRY_STACK_GAP_DN)) == 0) 2743 return; 2744 2745 old_prot = PROT_EXTRACT(entry->offset); 2746 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2747 entry->offset = PROT_MAX(new_maxprot) | 2748 (new_maxprot & old_prot); 2749 } 2750 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) { 2751 entry->offset = new_prot | PROT_MAX( 2752 PROT_MAX_EXTRACT(entry->offset)); 2753 } 2754 } 2755 2756 /* 2757 * vm_map_protect: 2758 * 2759 * Sets the protection and/or the maximum protection of the 2760 * specified address region in the target map. 2761 */ 2762 int 2763 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2764 vm_prot_t new_prot, vm_prot_t new_maxprot, int flags) 2765 { 2766 vm_map_entry_t entry, first_entry, in_tran, prev_entry; 2767 vm_object_t obj; 2768 struct ucred *cred; 2769 vm_offset_t orig_start; 2770 vm_prot_t check_prot, max_prot, old_prot; 2771 int rv; 2772 2773 if (start == end) 2774 return (KERN_SUCCESS); 2775 2776 if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT | 2777 VM_MAP_PROTECT_SET_MAXPROT) && 2778 !CONTAINS_BITS(new_maxprot, new_prot)) 2779 return (KERN_OUT_OF_BOUNDS); 2780 2781 orig_start = start; 2782 again: 2783 in_tran = NULL; 2784 start = orig_start; 2785 vm_map_lock(map); 2786 2787 if ((map->flags & MAP_WXORX) != 0 && 2788 (flags & VM_MAP_PROTECT_SET_PROT) != 0 && 2789 CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) { 2790 vm_map_unlock(map); 2791 return (KERN_PROTECTION_FAILURE); 2792 } 2793 2794 /* 2795 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2796 * need to fault pages into the map and will drop the map lock while 2797 * doing so, and the VM object may end up in an inconsistent state if we 2798 * update the protection on the map entry in between faults. 2799 */ 2800 vm_map_wait_busy(map); 2801 2802 VM_MAP_RANGE_CHECK(map, start, end); 2803 2804 if (!vm_map_lookup_entry(map, start, &first_entry)) 2805 first_entry = vm_map_entry_succ(first_entry); 2806 2807 if ((flags & VM_MAP_PROTECT_GROWSDOWN) != 0 && 2808 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { 2809 /* 2810 * Handle Linux's PROT_GROWSDOWN flag. 2811 * It means that protection is applied down to the 2812 * whole stack, including the specified range of the 2813 * mapped region, and the grow down region (AKA 2814 * guard). 2815 */ 2816 while (!CONTAINS_BITS(first_entry->eflags, 2817 MAP_ENTRY_GUARD | MAP_ENTRY_STACK_GAP_DN) && 2818 first_entry != vm_map_entry_first(map)) 2819 first_entry = vm_map_entry_pred(first_entry); 2820 start = first_entry->start; 2821 } 2822 2823 /* 2824 * Make a first pass to check for protection violations. 2825 */ 2826 check_prot = 0; 2827 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2828 check_prot |= new_prot; 2829 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) 2830 check_prot |= new_maxprot; 2831 for (entry = first_entry; entry->start < end; 2832 entry = vm_map_entry_succ(entry)) { 2833 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 2834 vm_map_unlock(map); 2835 return (KERN_INVALID_ARGUMENT); 2836 } 2837 if ((entry->eflags & (MAP_ENTRY_GUARD | 2838 MAP_ENTRY_STACK_GAP_DN | MAP_ENTRY_STACK_GAP_UP)) == 2839 MAP_ENTRY_GUARD) 2840 continue; 2841 max_prot = (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 2842 MAP_ENTRY_STACK_GAP_UP)) != 0 ? 2843 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; 2844 if (!CONTAINS_BITS(max_prot, check_prot)) { 2845 vm_map_unlock(map); 2846 return (KERN_PROTECTION_FAILURE); 2847 } 2848 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2849 in_tran = entry; 2850 } 2851 2852 /* 2853 * Postpone the operation until all in-transition map entries have 2854 * stabilized. An in-transition entry might already have its pages 2855 * wired and wired_count incremented, but not yet have its 2856 * MAP_ENTRY_USER_WIRED flag set. In which case, we would fail to call 2857 * vm_fault_copy_entry() in the final loop below. 2858 */ 2859 if (in_tran != NULL) { 2860 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2861 vm_map_unlock_and_wait(map, 0); 2862 goto again; 2863 } 2864 2865 /* 2866 * Before changing the protections, try to reserve swap space for any 2867 * private (i.e., copy-on-write) mappings that are transitioning from 2868 * read-only to read/write access. If a reservation fails, break out 2869 * of this loop early and let the next loop simplify the entries, since 2870 * some may now be mergeable. 2871 */ 2872 rv = vm_map_clip_start(map, first_entry, start); 2873 if (rv != KERN_SUCCESS) { 2874 vm_map_unlock(map); 2875 return (rv); 2876 } 2877 for (entry = first_entry; entry->start < end; 2878 entry = vm_map_entry_succ(entry)) { 2879 rv = vm_map_clip_end(map, entry, end); 2880 if (rv != KERN_SUCCESS) { 2881 vm_map_unlock(map); 2882 return (rv); 2883 } 2884 2885 if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 || 2886 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || 2887 ENTRY_CHARGED(entry) || 2888 (entry->eflags & MAP_ENTRY_GUARD) != 0) 2889 continue; 2890 2891 cred = curthread->td_ucred; 2892 obj = entry->object.vm_object; 2893 2894 if (obj == NULL || 2895 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { 2896 if (!swap_reserve(entry->end - entry->start)) { 2897 rv = KERN_RESOURCE_SHORTAGE; 2898 end = entry->end; 2899 break; 2900 } 2901 crhold(cred); 2902 entry->cred = cred; 2903 continue; 2904 } 2905 2906 VM_OBJECT_WLOCK(obj); 2907 if ((obj->flags & OBJ_SWAP) == 0) { 2908 VM_OBJECT_WUNLOCK(obj); 2909 continue; 2910 } 2911 2912 /* 2913 * Charge for the whole object allocation now, since 2914 * we cannot distinguish between non-charged and 2915 * charged clipped mapping of the same object later. 2916 */ 2917 KASSERT(obj->charge == 0, 2918 ("vm_map_protect: object %p overcharged (entry %p)", 2919 obj, entry)); 2920 if (!swap_reserve(ptoa(obj->size))) { 2921 VM_OBJECT_WUNLOCK(obj); 2922 rv = KERN_RESOURCE_SHORTAGE; 2923 end = entry->end; 2924 break; 2925 } 2926 2927 crhold(cred); 2928 obj->cred = cred; 2929 obj->charge = ptoa(obj->size); 2930 VM_OBJECT_WUNLOCK(obj); 2931 } 2932 2933 /* 2934 * If enough swap space was available, go back and fix up protections. 2935 * Otherwise, just simplify entries, since some may have been modified. 2936 * [Note that clipping is not necessary the second time.] 2937 */ 2938 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; 2939 entry->start < end; 2940 vm_map_try_merge_entries(map, prev_entry, entry), 2941 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 2942 if (rv != KERN_SUCCESS) 2943 continue; 2944 2945 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 2946 vm_map_protect_guard(entry, new_prot, new_maxprot, 2947 flags); 2948 continue; 2949 } 2950 2951 old_prot = entry->protection; 2952 2953 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2954 entry->max_protection = new_maxprot; 2955 entry->protection = new_maxprot & old_prot; 2956 } 2957 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2958 entry->protection = new_prot; 2959 2960 /* 2961 * For user wired map entries, the normal lazy evaluation of 2962 * write access upgrades through soft page faults is 2963 * undesirable. Instead, immediately copy any pages that are 2964 * copy-on-write and enable write access in the physical map. 2965 */ 2966 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2967 (entry->protection & VM_PROT_WRITE) != 0 && 2968 (old_prot & VM_PROT_WRITE) == 0) 2969 vm_fault_copy_entry(map, map, entry, entry, NULL); 2970 2971 /* 2972 * When restricting access, update the physical map. Worry 2973 * about copy-on-write here. 2974 */ 2975 if ((old_prot & ~entry->protection) != 0) { 2976 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2977 VM_PROT_ALL) 2978 pmap_protect(map->pmap, entry->start, 2979 entry->end, 2980 entry->protection & MASK(entry)); 2981 #undef MASK 2982 } 2983 } 2984 vm_map_try_merge_entries(map, prev_entry, entry); 2985 vm_map_unlock(map); 2986 return (rv); 2987 } 2988 2989 /* 2990 * vm_map_madvise: 2991 * 2992 * This routine traverses a processes map handling the madvise 2993 * system call. Advisories are classified as either those effecting 2994 * the vm_map_entry structure, or those effecting the underlying 2995 * objects. 2996 */ 2997 int 2998 vm_map_madvise( 2999 vm_map_t map, 3000 vm_offset_t start, 3001 vm_offset_t end, 3002 int behav) 3003 { 3004 vm_map_entry_t entry, prev_entry; 3005 int rv; 3006 bool modify_map; 3007 3008 /* 3009 * Some madvise calls directly modify the vm_map_entry, in which case 3010 * we need to use an exclusive lock on the map and we need to perform 3011 * various clipping operations. Otherwise we only need a read-lock 3012 * on the map. 3013 */ 3014 switch(behav) { 3015 case MADV_NORMAL: 3016 case MADV_SEQUENTIAL: 3017 case MADV_RANDOM: 3018 case MADV_NOSYNC: 3019 case MADV_AUTOSYNC: 3020 case MADV_NOCORE: 3021 case MADV_CORE: 3022 if (start == end) 3023 return (0); 3024 modify_map = true; 3025 vm_map_lock(map); 3026 break; 3027 case MADV_WILLNEED: 3028 case MADV_DONTNEED: 3029 case MADV_FREE: 3030 if (start == end) 3031 return (0); 3032 modify_map = false; 3033 vm_map_lock_read(map); 3034 break; 3035 default: 3036 return (EINVAL); 3037 } 3038 3039 /* 3040 * Locate starting entry and clip if necessary. 3041 */ 3042 VM_MAP_RANGE_CHECK(map, start, end); 3043 3044 if (modify_map) { 3045 /* 3046 * madvise behaviors that are implemented in the vm_map_entry. 3047 * 3048 * We clip the vm_map_entry so that behavioral changes are 3049 * limited to the specified address range. 3050 */ 3051 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry); 3052 if (rv != KERN_SUCCESS) { 3053 vm_map_unlock(map); 3054 return (vm_mmap_to_errno(rv)); 3055 } 3056 3057 for (; entry->start < end; prev_entry = entry, 3058 entry = vm_map_entry_succ(entry)) { 3059 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 3060 continue; 3061 3062 rv = vm_map_clip_end(map, entry, end); 3063 if (rv != KERN_SUCCESS) { 3064 vm_map_unlock(map); 3065 return (vm_mmap_to_errno(rv)); 3066 } 3067 3068 switch (behav) { 3069 case MADV_NORMAL: 3070 vm_map_entry_set_behavior(entry, 3071 MAP_ENTRY_BEHAV_NORMAL); 3072 break; 3073 case MADV_SEQUENTIAL: 3074 vm_map_entry_set_behavior(entry, 3075 MAP_ENTRY_BEHAV_SEQUENTIAL); 3076 break; 3077 case MADV_RANDOM: 3078 vm_map_entry_set_behavior(entry, 3079 MAP_ENTRY_BEHAV_RANDOM); 3080 break; 3081 case MADV_NOSYNC: 3082 entry->eflags |= MAP_ENTRY_NOSYNC; 3083 break; 3084 case MADV_AUTOSYNC: 3085 entry->eflags &= ~MAP_ENTRY_NOSYNC; 3086 break; 3087 case MADV_NOCORE: 3088 entry->eflags |= MAP_ENTRY_NOCOREDUMP; 3089 break; 3090 case MADV_CORE: 3091 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; 3092 break; 3093 default: 3094 break; 3095 } 3096 vm_map_try_merge_entries(map, prev_entry, entry); 3097 } 3098 vm_map_try_merge_entries(map, prev_entry, entry); 3099 vm_map_unlock(map); 3100 } else { 3101 vm_pindex_t pstart, pend; 3102 3103 /* 3104 * madvise behaviors that are implemented in the underlying 3105 * vm_object. 3106 * 3107 * Since we don't clip the vm_map_entry, we have to clip 3108 * the vm_object pindex and count. 3109 */ 3110 if (!vm_map_lookup_entry(map, start, &entry)) 3111 entry = vm_map_entry_succ(entry); 3112 for (; entry->start < end; 3113 entry = vm_map_entry_succ(entry)) { 3114 vm_offset_t useEnd, useStart; 3115 3116 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | 3117 MAP_ENTRY_GUARD)) != 0) 3118 continue; 3119 3120 /* 3121 * MADV_FREE would otherwise rewind time to 3122 * the creation of the shadow object. Because 3123 * we hold the VM map read-locked, neither the 3124 * entry's object nor the presence of a 3125 * backing object can change. 3126 */ 3127 if (behav == MADV_FREE && 3128 entry->object.vm_object != NULL && 3129 entry->object.vm_object->backing_object != NULL) 3130 continue; 3131 3132 pstart = OFF_TO_IDX(entry->offset); 3133 pend = pstart + atop(entry->end - entry->start); 3134 useStart = entry->start; 3135 useEnd = entry->end; 3136 3137 if (entry->start < start) { 3138 pstart += atop(start - entry->start); 3139 useStart = start; 3140 } 3141 if (entry->end > end) { 3142 pend -= atop(entry->end - end); 3143 useEnd = end; 3144 } 3145 3146 if (pstart >= pend) 3147 continue; 3148 3149 /* 3150 * Perform the pmap_advise() before clearing 3151 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 3152 * concurrent pmap operation, such as pmap_remove(), 3153 * could clear a reference in the pmap and set 3154 * PGA_REFERENCED on the page before the pmap_advise() 3155 * had completed. Consequently, the page would appear 3156 * referenced based upon an old reference that 3157 * occurred before this pmap_advise() ran. 3158 */ 3159 if (behav == MADV_DONTNEED || behav == MADV_FREE) 3160 pmap_advise(map->pmap, useStart, useEnd, 3161 behav); 3162 3163 vm_object_madvise(entry->object.vm_object, pstart, 3164 pend, behav); 3165 3166 /* 3167 * Pre-populate paging structures in the 3168 * WILLNEED case. For wired entries, the 3169 * paging structures are already populated. 3170 */ 3171 if (behav == MADV_WILLNEED && 3172 entry->wired_count == 0) { 3173 vm_map_pmap_enter(map, 3174 useStart, 3175 entry->protection, 3176 entry->object.vm_object, 3177 pstart, 3178 ptoa(pend - pstart), 3179 MAP_PREFAULT_MADVISE 3180 ); 3181 } 3182 } 3183 vm_map_unlock_read(map); 3184 } 3185 return (0); 3186 } 3187 3188 /* 3189 * vm_map_inherit: 3190 * 3191 * Sets the inheritance of the specified address 3192 * range in the target map. Inheritance 3193 * affects how the map will be shared with 3194 * child maps at the time of vmspace_fork. 3195 */ 3196 int 3197 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 3198 vm_inherit_t new_inheritance) 3199 { 3200 vm_map_entry_t entry, lentry, prev_entry, start_entry; 3201 int rv; 3202 3203 switch (new_inheritance) { 3204 case VM_INHERIT_NONE: 3205 case VM_INHERIT_COPY: 3206 case VM_INHERIT_SHARE: 3207 case VM_INHERIT_ZERO: 3208 break; 3209 default: 3210 return (KERN_INVALID_ARGUMENT); 3211 } 3212 if (start == end) 3213 return (KERN_SUCCESS); 3214 vm_map_lock(map); 3215 VM_MAP_RANGE_CHECK(map, start, end); 3216 rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry); 3217 if (rv != KERN_SUCCESS) 3218 goto unlock; 3219 if (vm_map_lookup_entry(map, end - 1, &lentry)) { 3220 rv = vm_map_clip_end(map, lentry, end); 3221 if (rv != KERN_SUCCESS) 3222 goto unlock; 3223 } 3224 if (new_inheritance == VM_INHERIT_COPY) { 3225 for (entry = start_entry; entry->start < end; 3226 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3227 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) 3228 != 0) { 3229 rv = KERN_INVALID_ARGUMENT; 3230 goto unlock; 3231 } 3232 } 3233 } 3234 for (entry = start_entry; entry->start < end; prev_entry = entry, 3235 entry = vm_map_entry_succ(entry)) { 3236 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", 3237 entry, (uintmax_t)entry->end, (uintmax_t)end)); 3238 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 3239 new_inheritance != VM_INHERIT_ZERO) 3240 entry->inheritance = new_inheritance; 3241 vm_map_try_merge_entries(map, prev_entry, entry); 3242 } 3243 vm_map_try_merge_entries(map, prev_entry, entry); 3244 unlock: 3245 vm_map_unlock(map); 3246 return (rv); 3247 } 3248 3249 /* 3250 * vm_map_entry_in_transition: 3251 * 3252 * Release the map lock, and sleep until the entry is no longer in 3253 * transition. Awake and acquire the map lock. If the map changed while 3254 * another held the lock, lookup a possibly-changed entry at or after the 3255 * 'start' position of the old entry. 3256 */ 3257 static vm_map_entry_t 3258 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, 3259 vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry) 3260 { 3261 vm_map_entry_t entry; 3262 vm_offset_t start; 3263 u_int last_timestamp; 3264 3265 VM_MAP_ASSERT_LOCKED(map); 3266 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3267 ("not in-tranition map entry %p", in_entry)); 3268 /* 3269 * We have not yet clipped the entry. 3270 */ 3271 start = MAX(in_start, in_entry->start); 3272 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3273 last_timestamp = map->timestamp; 3274 if (vm_map_unlock_and_wait(map, 0)) { 3275 /* 3276 * Allow interruption of user wiring/unwiring? 3277 */ 3278 } 3279 vm_map_lock(map); 3280 if (last_timestamp + 1 == map->timestamp) 3281 return (in_entry); 3282 3283 /* 3284 * Look again for the entry because the map was modified while it was 3285 * unlocked. Specifically, the entry may have been clipped, merged, or 3286 * deleted. 3287 */ 3288 if (!vm_map_lookup_entry(map, start, &entry)) { 3289 if (!holes_ok) { 3290 *io_end = start; 3291 return (NULL); 3292 } 3293 entry = vm_map_entry_succ(entry); 3294 } 3295 return (entry); 3296 } 3297 3298 /* 3299 * vm_map_unwire: 3300 * 3301 * Implements both kernel and user unwiring. 3302 */ 3303 int 3304 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 3305 int flags) 3306 { 3307 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3308 int rv; 3309 bool holes_ok, need_wakeup, user_unwire; 3310 3311 if (start == end) 3312 return (KERN_SUCCESS); 3313 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3314 user_unwire = (flags & VM_MAP_WIRE_USER) != 0; 3315 vm_map_lock(map); 3316 VM_MAP_RANGE_CHECK(map, start, end); 3317 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3318 if (holes_ok) 3319 first_entry = vm_map_entry_succ(first_entry); 3320 else { 3321 vm_map_unlock(map); 3322 return (KERN_INVALID_ADDRESS); 3323 } 3324 } 3325 rv = KERN_SUCCESS; 3326 for (entry = first_entry; entry->start < end; entry = next_entry) { 3327 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3328 /* 3329 * We have not yet clipped the entry. 3330 */ 3331 next_entry = vm_map_entry_in_transition(map, start, 3332 &end, holes_ok, entry); 3333 if (next_entry == NULL) { 3334 if (entry == first_entry) { 3335 vm_map_unlock(map); 3336 return (KERN_INVALID_ADDRESS); 3337 } 3338 rv = KERN_INVALID_ADDRESS; 3339 break; 3340 } 3341 first_entry = (entry == first_entry) ? 3342 next_entry : NULL; 3343 continue; 3344 } 3345 rv = vm_map_clip_start(map, entry, start); 3346 if (rv != KERN_SUCCESS) 3347 break; 3348 rv = vm_map_clip_end(map, entry, end); 3349 if (rv != KERN_SUCCESS) 3350 break; 3351 3352 /* 3353 * Mark the entry in case the map lock is released. (See 3354 * above.) 3355 */ 3356 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3357 entry->wiring_thread == NULL, 3358 ("owned map entry %p", entry)); 3359 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3360 entry->wiring_thread = curthread; 3361 next_entry = vm_map_entry_succ(entry); 3362 /* 3363 * Check the map for holes in the specified region. 3364 * If holes_ok, skip this check. 3365 */ 3366 if (!holes_ok && 3367 entry->end < end && next_entry->start > entry->end) { 3368 end = entry->end; 3369 rv = KERN_INVALID_ADDRESS; 3370 break; 3371 } 3372 /* 3373 * If system unwiring, require that the entry is system wired. 3374 */ 3375 if (!user_unwire && 3376 vm_map_entry_system_wired_count(entry) == 0) { 3377 end = entry->end; 3378 rv = KERN_INVALID_ARGUMENT; 3379 break; 3380 } 3381 } 3382 need_wakeup = false; 3383 if (first_entry == NULL && 3384 !vm_map_lookup_entry(map, start, &first_entry)) { 3385 KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); 3386 prev_entry = first_entry; 3387 entry = vm_map_entry_succ(first_entry); 3388 } else { 3389 prev_entry = vm_map_entry_pred(first_entry); 3390 entry = first_entry; 3391 } 3392 for (; entry->start < end; 3393 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3394 /* 3395 * If holes_ok was specified, an empty 3396 * space in the unwired region could have been mapped 3397 * while the map lock was dropped for draining 3398 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 3399 * could be simultaneously wiring this new mapping 3400 * entry. Detect these cases and skip any entries 3401 * marked as in transition by us. 3402 */ 3403 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3404 entry->wiring_thread != curthread) { 3405 KASSERT(holes_ok, 3406 ("vm_map_unwire: !HOLESOK and new/changed entry")); 3407 continue; 3408 } 3409 3410 if (rv == KERN_SUCCESS && (!user_unwire || 3411 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 3412 if (entry->wired_count == 1) 3413 vm_map_entry_unwire(map, entry); 3414 else 3415 entry->wired_count--; 3416 if (user_unwire) 3417 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3418 } 3419 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3420 ("vm_map_unwire: in-transition flag missing %p", entry)); 3421 KASSERT(entry->wiring_thread == curthread, 3422 ("vm_map_unwire: alien wire %p", entry)); 3423 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 3424 entry->wiring_thread = NULL; 3425 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3426 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3427 need_wakeup = true; 3428 } 3429 vm_map_try_merge_entries(map, prev_entry, entry); 3430 } 3431 vm_map_try_merge_entries(map, prev_entry, entry); 3432 vm_map_unlock(map); 3433 if (need_wakeup) 3434 vm_map_wakeup(map); 3435 return (rv); 3436 } 3437 3438 static void 3439 vm_map_wire_user_count_sub(u_long npages) 3440 { 3441 3442 atomic_subtract_long(&vm_user_wire_count, npages); 3443 } 3444 3445 static bool 3446 vm_map_wire_user_count_add(u_long npages) 3447 { 3448 u_long wired; 3449 3450 wired = vm_user_wire_count; 3451 do { 3452 if (npages + wired > vm_page_max_user_wired) 3453 return (false); 3454 } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, 3455 npages + wired)); 3456 3457 return (true); 3458 } 3459 3460 /* 3461 * vm_map_wire_entry_failure: 3462 * 3463 * Handle a wiring failure on the given entry. 3464 * 3465 * The map should be locked. 3466 */ 3467 static void 3468 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 3469 vm_offset_t failed_addr) 3470 { 3471 3472 VM_MAP_ASSERT_LOCKED(map); 3473 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 3474 entry->wired_count == 1, 3475 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 3476 KASSERT(failed_addr < entry->end, 3477 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 3478 3479 /* 3480 * If any pages at the start of this entry were successfully wired, 3481 * then unwire them. 3482 */ 3483 if (failed_addr > entry->start) { 3484 pmap_unwire(map->pmap, entry->start, failed_addr); 3485 vm_object_unwire(entry->object.vm_object, entry->offset, 3486 failed_addr - entry->start, PQ_ACTIVE); 3487 } 3488 3489 /* 3490 * Assign an out-of-range value to represent the failure to wire this 3491 * entry. 3492 */ 3493 entry->wired_count = -1; 3494 } 3495 3496 int 3497 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3498 { 3499 int rv; 3500 3501 vm_map_lock(map); 3502 rv = vm_map_wire_locked(map, start, end, flags); 3503 vm_map_unlock(map); 3504 return (rv); 3505 } 3506 3507 /* 3508 * vm_map_wire_locked: 3509 * 3510 * Implements both kernel and user wiring. Returns with the map locked, 3511 * the map lock may be dropped. 3512 */ 3513 int 3514 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3515 { 3516 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3517 vm_offset_t faddr, saved_end, saved_start; 3518 u_long incr, npages; 3519 u_int bidx, last_timestamp; 3520 int rv; 3521 bool holes_ok, need_wakeup, user_wire; 3522 vm_prot_t prot; 3523 3524 VM_MAP_ASSERT_LOCKED(map); 3525 3526 if (start == end) 3527 return (KERN_SUCCESS); 3528 prot = 0; 3529 if (flags & VM_MAP_WIRE_WRITE) 3530 prot |= VM_PROT_WRITE; 3531 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3532 user_wire = (flags & VM_MAP_WIRE_USER) != 0; 3533 VM_MAP_RANGE_CHECK(map, start, end); 3534 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3535 if (holes_ok) 3536 first_entry = vm_map_entry_succ(first_entry); 3537 else 3538 return (KERN_INVALID_ADDRESS); 3539 } 3540 for (entry = first_entry; entry->start < end; entry = next_entry) { 3541 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3542 /* 3543 * We have not yet clipped the entry. 3544 */ 3545 next_entry = vm_map_entry_in_transition(map, start, 3546 &end, holes_ok, entry); 3547 if (next_entry == NULL) { 3548 if (entry == first_entry) 3549 return (KERN_INVALID_ADDRESS); 3550 rv = KERN_INVALID_ADDRESS; 3551 goto done; 3552 } 3553 first_entry = (entry == first_entry) ? 3554 next_entry : NULL; 3555 continue; 3556 } 3557 rv = vm_map_clip_start(map, entry, start); 3558 if (rv != KERN_SUCCESS) 3559 goto done; 3560 rv = vm_map_clip_end(map, entry, end); 3561 if (rv != KERN_SUCCESS) 3562 goto done; 3563 3564 /* 3565 * Mark the entry in case the map lock is released. (See 3566 * above.) 3567 */ 3568 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3569 entry->wiring_thread == NULL, 3570 ("owned map entry %p", entry)); 3571 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3572 entry->wiring_thread = curthread; 3573 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3574 || (entry->protection & prot) != prot) { 3575 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3576 if (!holes_ok) { 3577 end = entry->end; 3578 rv = KERN_INVALID_ADDRESS; 3579 goto done; 3580 } 3581 } else if (entry->wired_count == 0) { 3582 entry->wired_count++; 3583 3584 npages = atop(entry->end - entry->start); 3585 if (user_wire && !vm_map_wire_user_count_add(npages)) { 3586 vm_map_wire_entry_failure(map, entry, 3587 entry->start); 3588 end = entry->end; 3589 rv = KERN_RESOURCE_SHORTAGE; 3590 goto done; 3591 } 3592 3593 /* 3594 * Release the map lock, relying on the in-transition 3595 * mark. Mark the map busy for fork. 3596 */ 3597 saved_start = entry->start; 3598 saved_end = entry->end; 3599 last_timestamp = map->timestamp; 3600 bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3601 incr = pagesizes[bidx]; 3602 vm_map_busy(map); 3603 vm_map_unlock(map); 3604 3605 for (faddr = saved_start; faddr < saved_end; 3606 faddr += incr) { 3607 /* 3608 * Simulate a fault to get the page and enter 3609 * it into the physical map. 3610 */ 3611 rv = vm_fault(map, faddr, VM_PROT_NONE, 3612 VM_FAULT_WIRE, NULL); 3613 if (rv != KERN_SUCCESS) 3614 break; 3615 } 3616 vm_map_lock(map); 3617 vm_map_unbusy(map); 3618 if (last_timestamp + 1 != map->timestamp) { 3619 /* 3620 * Look again for the entry because the map was 3621 * modified while it was unlocked. The entry 3622 * may have been clipped, but NOT merged or 3623 * deleted. 3624 */ 3625 if (!vm_map_lookup_entry(map, saved_start, 3626 &next_entry)) 3627 KASSERT(false, 3628 ("vm_map_wire: lookup failed")); 3629 first_entry = (entry == first_entry) ? 3630 next_entry : NULL; 3631 for (entry = next_entry; entry->end < saved_end; 3632 entry = vm_map_entry_succ(entry)) { 3633 /* 3634 * In case of failure, handle entries 3635 * that were not fully wired here; 3636 * fully wired entries are handled 3637 * later. 3638 */ 3639 if (rv != KERN_SUCCESS && 3640 faddr < entry->end) 3641 vm_map_wire_entry_failure(map, 3642 entry, faddr); 3643 } 3644 } 3645 if (rv != KERN_SUCCESS) { 3646 vm_map_wire_entry_failure(map, entry, faddr); 3647 if (user_wire) 3648 vm_map_wire_user_count_sub(npages); 3649 end = entry->end; 3650 goto done; 3651 } 3652 } else if (!user_wire || 3653 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3654 entry->wired_count++; 3655 } 3656 /* 3657 * Check the map for holes in the specified region. 3658 * If holes_ok was specified, skip this check. 3659 */ 3660 next_entry = vm_map_entry_succ(entry); 3661 if (!holes_ok && 3662 entry->end < end && next_entry->start > entry->end) { 3663 end = entry->end; 3664 rv = KERN_INVALID_ADDRESS; 3665 goto done; 3666 } 3667 } 3668 rv = KERN_SUCCESS; 3669 done: 3670 need_wakeup = false; 3671 if (first_entry == NULL && 3672 !vm_map_lookup_entry(map, start, &first_entry)) { 3673 KASSERT(holes_ok, ("vm_map_wire: lookup failed")); 3674 prev_entry = first_entry; 3675 entry = vm_map_entry_succ(first_entry); 3676 } else { 3677 prev_entry = vm_map_entry_pred(first_entry); 3678 entry = first_entry; 3679 } 3680 for (; entry->start < end; 3681 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3682 /* 3683 * If holes_ok was specified, an empty 3684 * space in the unwired region could have been mapped 3685 * while the map lock was dropped for faulting in the 3686 * pages or draining MAP_ENTRY_IN_TRANSITION. 3687 * Moreover, another thread could be simultaneously 3688 * wiring this new mapping entry. Detect these cases 3689 * and skip any entries marked as in transition not by us. 3690 * 3691 * Another way to get an entry not marked with 3692 * MAP_ENTRY_IN_TRANSITION is after failed clipping, 3693 * which set rv to KERN_INVALID_ARGUMENT. 3694 */ 3695 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3696 entry->wiring_thread != curthread) { 3697 KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT, 3698 ("vm_map_wire: !HOLESOK and new/changed entry")); 3699 continue; 3700 } 3701 3702 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { 3703 /* do nothing */ 3704 } else if (rv == KERN_SUCCESS) { 3705 if (user_wire) 3706 entry->eflags |= MAP_ENTRY_USER_WIRED; 3707 } else if (entry->wired_count == -1) { 3708 /* 3709 * Wiring failed on this entry. Thus, unwiring is 3710 * unnecessary. 3711 */ 3712 entry->wired_count = 0; 3713 } else if (!user_wire || 3714 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3715 /* 3716 * Undo the wiring. Wiring succeeded on this entry 3717 * but failed on a later entry. 3718 */ 3719 if (entry->wired_count == 1) { 3720 vm_map_entry_unwire(map, entry); 3721 if (user_wire) 3722 vm_map_wire_user_count_sub( 3723 atop(entry->end - entry->start)); 3724 } else 3725 entry->wired_count--; 3726 } 3727 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3728 ("vm_map_wire: in-transition flag missing %p", entry)); 3729 KASSERT(entry->wiring_thread == curthread, 3730 ("vm_map_wire: alien wire %p", entry)); 3731 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3732 MAP_ENTRY_WIRE_SKIPPED); 3733 entry->wiring_thread = NULL; 3734 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3735 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3736 need_wakeup = true; 3737 } 3738 vm_map_try_merge_entries(map, prev_entry, entry); 3739 } 3740 vm_map_try_merge_entries(map, prev_entry, entry); 3741 if (need_wakeup) 3742 vm_map_wakeup(map); 3743 return (rv); 3744 } 3745 3746 /* 3747 * vm_map_sync 3748 * 3749 * Push any dirty cached pages in the address range to their pager. 3750 * If syncio is TRUE, dirty pages are written synchronously. 3751 * If invalidate is TRUE, any cached pages are freed as well. 3752 * 3753 * If the size of the region from start to end is zero, we are 3754 * supposed to flush all modified pages within the region containing 3755 * start. Unfortunately, a region can be split or coalesced with 3756 * neighboring regions, making it difficult to determine what the 3757 * original region was. Therefore, we approximate this requirement by 3758 * flushing the current region containing start. 3759 * 3760 * Returns an error if any part of the specified range is not mapped. 3761 */ 3762 int 3763 vm_map_sync( 3764 vm_map_t map, 3765 vm_offset_t start, 3766 vm_offset_t end, 3767 boolean_t syncio, 3768 boolean_t invalidate) 3769 { 3770 vm_map_entry_t entry, first_entry, next_entry; 3771 vm_size_t size; 3772 vm_object_t object; 3773 vm_ooffset_t offset; 3774 unsigned int last_timestamp; 3775 int bdry_idx; 3776 boolean_t failed; 3777 3778 vm_map_lock_read(map); 3779 VM_MAP_RANGE_CHECK(map, start, end); 3780 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3781 vm_map_unlock_read(map); 3782 return (KERN_INVALID_ADDRESS); 3783 } else if (start == end) { 3784 start = first_entry->start; 3785 end = first_entry->end; 3786 } 3787 3788 /* 3789 * Make a first pass to check for user-wired memory, holes, 3790 * and partial invalidation of largepage mappings. 3791 */ 3792 for (entry = first_entry; entry->start < end; entry = next_entry) { 3793 if (invalidate) { 3794 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { 3795 vm_map_unlock_read(map); 3796 return (KERN_INVALID_ARGUMENT); 3797 } 3798 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3799 if (bdry_idx != 0 && 3800 ((start & (pagesizes[bdry_idx] - 1)) != 0 || 3801 (end & (pagesizes[bdry_idx] - 1)) != 0)) { 3802 vm_map_unlock_read(map); 3803 return (KERN_INVALID_ARGUMENT); 3804 } 3805 } 3806 next_entry = vm_map_entry_succ(entry); 3807 if (end > entry->end && 3808 entry->end != next_entry->start) { 3809 vm_map_unlock_read(map); 3810 return (KERN_INVALID_ADDRESS); 3811 } 3812 } 3813 3814 if (invalidate) 3815 pmap_remove(map->pmap, start, end); 3816 failed = FALSE; 3817 3818 /* 3819 * Make a second pass, cleaning/uncaching pages from the indicated 3820 * objects as we go. 3821 */ 3822 for (entry = first_entry; entry->start < end;) { 3823 offset = entry->offset + (start - entry->start); 3824 size = (end <= entry->end ? end : entry->end) - start; 3825 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 3826 vm_map_t smap; 3827 vm_map_entry_t tentry; 3828 vm_size_t tsize; 3829 3830 smap = entry->object.sub_map; 3831 vm_map_lock_read(smap); 3832 (void) vm_map_lookup_entry(smap, offset, &tentry); 3833 tsize = tentry->end - offset; 3834 if (tsize < size) 3835 size = tsize; 3836 object = tentry->object.vm_object; 3837 offset = tentry->offset + (offset - tentry->start); 3838 vm_map_unlock_read(smap); 3839 } else { 3840 object = entry->object.vm_object; 3841 } 3842 vm_object_reference(object); 3843 last_timestamp = map->timestamp; 3844 vm_map_unlock_read(map); 3845 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3846 failed = TRUE; 3847 start += size; 3848 vm_object_deallocate(object); 3849 vm_map_lock_read(map); 3850 if (last_timestamp == map->timestamp || 3851 !vm_map_lookup_entry(map, start, &entry)) 3852 entry = vm_map_entry_succ(entry); 3853 } 3854 3855 vm_map_unlock_read(map); 3856 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3857 } 3858 3859 /* 3860 * vm_map_entry_unwire: [ internal use only ] 3861 * 3862 * Make the region specified by this entry pageable. 3863 * 3864 * The map in question should be locked. 3865 * [This is the reason for this routine's existence.] 3866 */ 3867 static void 3868 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3869 { 3870 vm_size_t size; 3871 3872 VM_MAP_ASSERT_LOCKED(map); 3873 KASSERT(entry->wired_count > 0, 3874 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3875 3876 size = entry->end - entry->start; 3877 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) 3878 vm_map_wire_user_count_sub(atop(size)); 3879 pmap_unwire(map->pmap, entry->start, entry->end); 3880 vm_object_unwire(entry->object.vm_object, entry->offset, size, 3881 PQ_ACTIVE); 3882 entry->wired_count = 0; 3883 } 3884 3885 static void 3886 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3887 { 3888 3889 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3890 vm_object_deallocate(entry->object.vm_object); 3891 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3892 } 3893 3894 /* 3895 * vm_map_entry_delete: [ internal use only ] 3896 * 3897 * Deallocate the given entry from the target map. 3898 */ 3899 static void 3900 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3901 { 3902 vm_object_t object; 3903 vm_pindex_t offidxstart, offidxend, size1; 3904 vm_size_t size; 3905 3906 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3907 object = entry->object.vm_object; 3908 3909 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3910 MPASS(entry->cred == NULL); 3911 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3912 MPASS(object == NULL); 3913 vm_map_entry_deallocate(entry, map->system_map); 3914 return; 3915 } 3916 3917 size = entry->end - entry->start; 3918 map->size -= size; 3919 3920 if (entry->cred != NULL) { 3921 swap_release_by_cred(size, entry->cred); 3922 crfree(entry->cred); 3923 } 3924 3925 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { 3926 entry->object.vm_object = NULL; 3927 } else if ((object->flags & OBJ_ANON) != 0 || 3928 object == kernel_object) { 3929 KASSERT(entry->cred == NULL || object->cred == NULL || 3930 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3931 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3932 offidxstart = OFF_TO_IDX(entry->offset); 3933 offidxend = offidxstart + atop(size); 3934 VM_OBJECT_WLOCK(object); 3935 if (object->ref_count != 1 && 3936 ((object->flags & OBJ_ONEMAPPING) != 0 || 3937 object == kernel_object)) { 3938 vm_object_collapse(object); 3939 3940 /* 3941 * The option OBJPR_NOTMAPPED can be passed here 3942 * because vm_map_delete() already performed 3943 * pmap_remove() on the only mapping to this range 3944 * of pages. 3945 */ 3946 vm_object_page_remove(object, offidxstart, offidxend, 3947 OBJPR_NOTMAPPED); 3948 if (offidxend >= object->size && 3949 offidxstart < object->size) { 3950 size1 = object->size; 3951 object->size = offidxstart; 3952 if (object->cred != NULL) { 3953 size1 -= object->size; 3954 KASSERT(object->charge >= ptoa(size1), 3955 ("object %p charge < 0", object)); 3956 swap_release_by_cred(ptoa(size1), 3957 object->cred); 3958 object->charge -= ptoa(size1); 3959 } 3960 } 3961 } 3962 VM_OBJECT_WUNLOCK(object); 3963 } 3964 if (map->system_map) 3965 vm_map_entry_deallocate(entry, TRUE); 3966 else { 3967 entry->defer_next = curthread->td_map_def_user; 3968 curthread->td_map_def_user = entry; 3969 } 3970 } 3971 3972 /* 3973 * vm_map_delete: [ internal use only ] 3974 * 3975 * Deallocates the given address range from the target 3976 * map. 3977 */ 3978 int 3979 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3980 { 3981 vm_map_entry_t entry, next_entry, scratch_entry; 3982 int rv; 3983 3984 VM_MAP_ASSERT_LOCKED(map); 3985 3986 if (start == end) 3987 return (KERN_SUCCESS); 3988 3989 /* 3990 * Find the start of the region, and clip it. 3991 * Step through all entries in this region. 3992 */ 3993 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry); 3994 if (rv != KERN_SUCCESS) 3995 return (rv); 3996 for (; entry->start < end; entry = next_entry) { 3997 /* 3998 * Wait for wiring or unwiring of an entry to complete. 3999 * Also wait for any system wirings to disappear on 4000 * user maps. 4001 */ 4002 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 4003 (vm_map_pmap(map) != kernel_pmap && 4004 vm_map_entry_system_wired_count(entry) != 0)) { 4005 unsigned int last_timestamp; 4006 vm_offset_t saved_start; 4007 4008 saved_start = entry->start; 4009 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4010 last_timestamp = map->timestamp; 4011 (void) vm_map_unlock_and_wait(map, 0); 4012 vm_map_lock(map); 4013 if (last_timestamp + 1 != map->timestamp) { 4014 /* 4015 * Look again for the entry because the map was 4016 * modified while it was unlocked. 4017 * Specifically, the entry may have been 4018 * clipped, merged, or deleted. 4019 */ 4020 rv = vm_map_lookup_clip_start(map, saved_start, 4021 &next_entry, &scratch_entry); 4022 if (rv != KERN_SUCCESS) 4023 break; 4024 } else 4025 next_entry = entry; 4026 continue; 4027 } 4028 4029 /* XXXKIB or delete to the upper superpage boundary ? */ 4030 rv = vm_map_clip_end(map, entry, end); 4031 if (rv != KERN_SUCCESS) 4032 break; 4033 next_entry = vm_map_entry_succ(entry); 4034 4035 /* 4036 * Unwire before removing addresses from the pmap; otherwise, 4037 * unwiring will put the entries back in the pmap. 4038 */ 4039 if (entry->wired_count != 0) 4040 vm_map_entry_unwire(map, entry); 4041 4042 /* 4043 * Remove mappings for the pages, but only if the 4044 * mappings could exist. For instance, it does not 4045 * make sense to call pmap_remove() for guard entries. 4046 */ 4047 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 4048 entry->object.vm_object != NULL) 4049 pmap_map_delete(map->pmap, entry->start, entry->end); 4050 4051 /* 4052 * Delete the entry only after removing all pmap 4053 * entries pointing to its pages. (Otherwise, its 4054 * page frames may be reallocated, and any modify bits 4055 * will be set in the wrong object!) 4056 */ 4057 vm_map_entry_delete(map, entry); 4058 } 4059 return (rv); 4060 } 4061 4062 /* 4063 * vm_map_remove: 4064 * 4065 * Remove the given address range from the target map. 4066 * This is the exported form of vm_map_delete. 4067 */ 4068 int 4069 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 4070 { 4071 int result; 4072 4073 vm_map_lock(map); 4074 VM_MAP_RANGE_CHECK(map, start, end); 4075 result = vm_map_delete(map, start, end); 4076 vm_map_unlock(map); 4077 return (result); 4078 } 4079 4080 /* 4081 * vm_map_check_protection: 4082 * 4083 * Assert that the target map allows the specified privilege on the 4084 * entire address region given. The entire region must be allocated. 4085 * 4086 * WARNING! This code does not and should not check whether the 4087 * contents of the region is accessible. For example a smaller file 4088 * might be mapped into a larger address space. 4089 * 4090 * NOTE! This code is also called by munmap(). 4091 * 4092 * The map must be locked. A read lock is sufficient. 4093 */ 4094 boolean_t 4095 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 4096 vm_prot_t protection) 4097 { 4098 vm_map_entry_t entry; 4099 vm_map_entry_t tmp_entry; 4100 4101 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 4102 return (FALSE); 4103 entry = tmp_entry; 4104 4105 while (start < end) { 4106 /* 4107 * No holes allowed! 4108 */ 4109 if (start < entry->start) 4110 return (FALSE); 4111 /* 4112 * Check protection associated with entry. 4113 */ 4114 if ((entry->protection & protection) != protection) 4115 return (FALSE); 4116 /* go to next entry */ 4117 start = entry->end; 4118 entry = vm_map_entry_succ(entry); 4119 } 4120 return (TRUE); 4121 } 4122 4123 /* 4124 * 4125 * vm_map_copy_swap_object: 4126 * 4127 * Copies a swap-backed object from an existing map entry to a 4128 * new one. Carries forward the swap charge. May change the 4129 * src object on return. 4130 */ 4131 static void 4132 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, 4133 vm_offset_t size, vm_ooffset_t *fork_charge) 4134 { 4135 vm_object_t src_object; 4136 struct ucred *cred; 4137 int charged; 4138 4139 src_object = src_entry->object.vm_object; 4140 charged = ENTRY_CHARGED(src_entry); 4141 if ((src_object->flags & OBJ_ANON) != 0) { 4142 VM_OBJECT_WLOCK(src_object); 4143 vm_object_collapse(src_object); 4144 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { 4145 vm_object_split(src_entry); 4146 src_object = src_entry->object.vm_object; 4147 } 4148 vm_object_reference_locked(src_object); 4149 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 4150 VM_OBJECT_WUNLOCK(src_object); 4151 } else 4152 vm_object_reference(src_object); 4153 if (src_entry->cred != NULL && 4154 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4155 KASSERT(src_object->cred == NULL, 4156 ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p", 4157 src_object)); 4158 src_object->cred = src_entry->cred; 4159 src_object->charge = size; 4160 } 4161 dst_entry->object.vm_object = src_object; 4162 if (charged) { 4163 cred = curthread->td_ucred; 4164 crhold(cred); 4165 dst_entry->cred = cred; 4166 *fork_charge += size; 4167 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4168 crhold(cred); 4169 src_entry->cred = cred; 4170 *fork_charge += size; 4171 } 4172 } 4173 } 4174 4175 /* 4176 * vm_map_copy_entry: 4177 * 4178 * Copies the contents of the source entry to the destination 4179 * entry. The entries *must* be aligned properly. 4180 */ 4181 static void 4182 vm_map_copy_entry( 4183 vm_map_t src_map, 4184 vm_map_t dst_map, 4185 vm_map_entry_t src_entry, 4186 vm_map_entry_t dst_entry, 4187 vm_ooffset_t *fork_charge) 4188 { 4189 vm_object_t src_object; 4190 vm_map_entry_t fake_entry; 4191 vm_offset_t size; 4192 4193 VM_MAP_ASSERT_LOCKED(dst_map); 4194 4195 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 4196 return; 4197 4198 if (src_entry->wired_count == 0 || 4199 (src_entry->protection & VM_PROT_WRITE) == 0) { 4200 /* 4201 * If the source entry is marked needs_copy, it is already 4202 * write-protected. 4203 */ 4204 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 4205 (src_entry->protection & VM_PROT_WRITE) != 0) { 4206 pmap_protect(src_map->pmap, 4207 src_entry->start, 4208 src_entry->end, 4209 src_entry->protection & ~VM_PROT_WRITE); 4210 } 4211 4212 /* 4213 * Make a copy of the object. 4214 */ 4215 size = src_entry->end - src_entry->start; 4216 if ((src_object = src_entry->object.vm_object) != NULL) { 4217 if ((src_object->flags & OBJ_SWAP) != 0) { 4218 vm_map_copy_swap_object(src_entry, dst_entry, 4219 size, fork_charge); 4220 /* May have split/collapsed, reload obj. */ 4221 src_object = src_entry->object.vm_object; 4222 } else { 4223 vm_object_reference(src_object); 4224 dst_entry->object.vm_object = src_object; 4225 } 4226 src_entry->eflags |= MAP_ENTRY_COW | 4227 MAP_ENTRY_NEEDS_COPY; 4228 dst_entry->eflags |= MAP_ENTRY_COW | 4229 MAP_ENTRY_NEEDS_COPY; 4230 dst_entry->offset = src_entry->offset; 4231 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { 4232 /* 4233 * MAP_ENTRY_WRITECNT cannot 4234 * indicate write reference from 4235 * src_entry, since the entry is 4236 * marked as needs copy. Allocate a 4237 * fake entry that is used to 4238 * decrement object->un_pager writecount 4239 * at the appropriate time. Attach 4240 * fake_entry to the deferred list. 4241 */ 4242 fake_entry = vm_map_entry_create(dst_map); 4243 fake_entry->eflags = MAP_ENTRY_WRITECNT; 4244 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; 4245 vm_object_reference(src_object); 4246 fake_entry->object.vm_object = src_object; 4247 fake_entry->start = src_entry->start; 4248 fake_entry->end = src_entry->end; 4249 fake_entry->defer_next = 4250 curthread->td_map_def_user; 4251 curthread->td_map_def_user = fake_entry; 4252 } 4253 4254 pmap_copy(dst_map->pmap, src_map->pmap, 4255 dst_entry->start, dst_entry->end - dst_entry->start, 4256 src_entry->start); 4257 } else { 4258 dst_entry->object.vm_object = NULL; 4259 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) 4260 dst_entry->offset = 0; 4261 if (src_entry->cred != NULL) { 4262 dst_entry->cred = curthread->td_ucred; 4263 crhold(dst_entry->cred); 4264 *fork_charge += size; 4265 } 4266 } 4267 } else { 4268 /* 4269 * We don't want to make writeable wired pages copy-on-write. 4270 * Immediately copy these pages into the new map by simulating 4271 * page faults. The new pages are pageable. 4272 */ 4273 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 4274 fork_charge); 4275 } 4276 } 4277 4278 /* 4279 * vmspace_map_entry_forked: 4280 * Update the newly-forked vmspace each time a map entry is inherited 4281 * or copied. The values for vm_dsize and vm_tsize are approximate 4282 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 4283 */ 4284 static void 4285 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 4286 vm_map_entry_t entry) 4287 { 4288 vm_size_t entrysize; 4289 vm_offset_t newend; 4290 4291 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 4292 return; 4293 entrysize = entry->end - entry->start; 4294 vm2->vm_map.size += entrysize; 4295 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 4296 vm2->vm_ssize += btoc(entrysize); 4297 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 4298 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 4299 newend = MIN(entry->end, 4300 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 4301 vm2->vm_dsize += btoc(newend - entry->start); 4302 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 4303 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 4304 newend = MIN(entry->end, 4305 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 4306 vm2->vm_tsize += btoc(newend - entry->start); 4307 } 4308 } 4309 4310 /* 4311 * vmspace_fork: 4312 * Create a new process vmspace structure and vm_map 4313 * based on those of an existing process. The new map 4314 * is based on the old map, according to the inheritance 4315 * values on the regions in that map. 4316 * 4317 * XXX It might be worth coalescing the entries added to the new vmspace. 4318 * 4319 * The source map must not be locked. 4320 */ 4321 struct vmspace * 4322 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 4323 { 4324 struct vmspace *vm2; 4325 vm_map_t new_map, old_map; 4326 vm_map_entry_t new_entry, old_entry; 4327 vm_object_t object; 4328 int error, locked __diagused; 4329 vm_inherit_t inh; 4330 4331 old_map = &vm1->vm_map; 4332 /* Copy immutable fields of vm1 to vm2. */ 4333 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 4334 pmap_pinit); 4335 if (vm2 == NULL) 4336 return (NULL); 4337 4338 vm2->vm_taddr = vm1->vm_taddr; 4339 vm2->vm_daddr = vm1->vm_daddr; 4340 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 4341 vm2->vm_stacktop = vm1->vm_stacktop; 4342 vm2->vm_shp_base = vm1->vm_shp_base; 4343 vm_map_lock(old_map); 4344 if (old_map->busy) 4345 vm_map_wait_busy(old_map); 4346 new_map = &vm2->vm_map; 4347 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 4348 KASSERT(locked, ("vmspace_fork: lock failed")); 4349 4350 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 4351 if (error != 0) { 4352 sx_xunlock(&old_map->lock); 4353 sx_xunlock(&new_map->lock); 4354 vm_map_process_deferred(); 4355 vmspace_free(vm2); 4356 return (NULL); 4357 } 4358 4359 new_map->anon_loc = old_map->anon_loc; 4360 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | 4361 MAP_ASLR_STACK | MAP_WXORX); 4362 4363 VM_MAP_ENTRY_FOREACH(old_entry, old_map) { 4364 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 4365 panic("vm_map_fork: encountered a submap"); 4366 4367 inh = old_entry->inheritance; 4368 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 4369 inh != VM_INHERIT_NONE) 4370 inh = VM_INHERIT_COPY; 4371 4372 switch (inh) { 4373 case VM_INHERIT_NONE: 4374 break; 4375 4376 case VM_INHERIT_SHARE: 4377 /* 4378 * Clone the entry, creating the shared object if 4379 * necessary. 4380 */ 4381 object = old_entry->object.vm_object; 4382 if (object == NULL) { 4383 vm_map_entry_back(old_entry); 4384 object = old_entry->object.vm_object; 4385 } 4386 4387 /* 4388 * Add the reference before calling vm_object_shadow 4389 * to insure that a shadow object is created. 4390 */ 4391 vm_object_reference(object); 4392 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4393 vm_object_shadow(&old_entry->object.vm_object, 4394 &old_entry->offset, 4395 old_entry->end - old_entry->start, 4396 old_entry->cred, 4397 /* Transfer the second reference too. */ 4398 true); 4399 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4400 old_entry->cred = NULL; 4401 4402 /* 4403 * As in vm_map_merged_neighbor_dispose(), 4404 * the vnode lock will not be acquired in 4405 * this call to vm_object_deallocate(). 4406 */ 4407 vm_object_deallocate(object); 4408 object = old_entry->object.vm_object; 4409 } else { 4410 VM_OBJECT_WLOCK(object); 4411 vm_object_clear_flag(object, OBJ_ONEMAPPING); 4412 if (old_entry->cred != NULL) { 4413 KASSERT(object->cred == NULL, 4414 ("vmspace_fork both cred")); 4415 object->cred = old_entry->cred; 4416 object->charge = old_entry->end - 4417 old_entry->start; 4418 old_entry->cred = NULL; 4419 } 4420 4421 /* 4422 * Assert the correct state of the vnode 4423 * v_writecount while the object is locked, to 4424 * not relock it later for the assertion 4425 * correctness. 4426 */ 4427 if (old_entry->eflags & MAP_ENTRY_WRITECNT && 4428 object->type == OBJT_VNODE) { 4429 KASSERT(((struct vnode *)object-> 4430 handle)->v_writecount > 0, 4431 ("vmspace_fork: v_writecount %p", 4432 object)); 4433 KASSERT(object->un_pager.vnp. 4434 writemappings > 0, 4435 ("vmspace_fork: vnp.writecount %p", 4436 object)); 4437 } 4438 VM_OBJECT_WUNLOCK(object); 4439 } 4440 4441 /* 4442 * Clone the entry, referencing the shared object. 4443 */ 4444 new_entry = vm_map_entry_create(new_map); 4445 *new_entry = *old_entry; 4446 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4447 MAP_ENTRY_IN_TRANSITION); 4448 new_entry->wiring_thread = NULL; 4449 new_entry->wired_count = 0; 4450 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { 4451 vm_pager_update_writecount(object, 4452 new_entry->start, new_entry->end); 4453 } 4454 vm_map_entry_set_vnode_text(new_entry, true); 4455 4456 /* 4457 * Insert the entry into the new map -- we know we're 4458 * inserting at the end of the new map. 4459 */ 4460 vm_map_entry_link(new_map, new_entry); 4461 vmspace_map_entry_forked(vm1, vm2, new_entry); 4462 4463 /* 4464 * Update the physical map 4465 */ 4466 pmap_copy(new_map->pmap, old_map->pmap, 4467 new_entry->start, 4468 (old_entry->end - old_entry->start), 4469 old_entry->start); 4470 break; 4471 4472 case VM_INHERIT_COPY: 4473 /* 4474 * Clone the entry and link into the map. 4475 */ 4476 new_entry = vm_map_entry_create(new_map); 4477 *new_entry = *old_entry; 4478 /* 4479 * Copied entry is COW over the old object. 4480 */ 4481 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4482 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT); 4483 new_entry->wiring_thread = NULL; 4484 new_entry->wired_count = 0; 4485 new_entry->object.vm_object = NULL; 4486 new_entry->cred = NULL; 4487 vm_map_entry_link(new_map, new_entry); 4488 vmspace_map_entry_forked(vm1, vm2, new_entry); 4489 vm_map_copy_entry(old_map, new_map, old_entry, 4490 new_entry, fork_charge); 4491 vm_map_entry_set_vnode_text(new_entry, true); 4492 break; 4493 4494 case VM_INHERIT_ZERO: 4495 /* 4496 * Create a new anonymous mapping entry modelled from 4497 * the old one. 4498 */ 4499 new_entry = vm_map_entry_create(new_map); 4500 memset(new_entry, 0, sizeof(*new_entry)); 4501 4502 new_entry->start = old_entry->start; 4503 new_entry->end = old_entry->end; 4504 new_entry->eflags = old_entry->eflags & 4505 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 4506 MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC | 4507 MAP_ENTRY_SPLIT_BOUNDARY_MASK); 4508 new_entry->protection = old_entry->protection; 4509 new_entry->max_protection = old_entry->max_protection; 4510 new_entry->inheritance = VM_INHERIT_ZERO; 4511 4512 vm_map_entry_link(new_map, new_entry); 4513 vmspace_map_entry_forked(vm1, vm2, new_entry); 4514 4515 new_entry->cred = curthread->td_ucred; 4516 crhold(new_entry->cred); 4517 *fork_charge += (new_entry->end - new_entry->start); 4518 4519 break; 4520 } 4521 } 4522 /* 4523 * Use inlined vm_map_unlock() to postpone handling the deferred 4524 * map entries, which cannot be done until both old_map and 4525 * new_map locks are released. 4526 */ 4527 sx_xunlock(&old_map->lock); 4528 sx_xunlock(&new_map->lock); 4529 vm_map_process_deferred(); 4530 4531 return (vm2); 4532 } 4533 4534 /* 4535 * Create a process's stack for exec_new_vmspace(). This function is never 4536 * asked to wire the newly created stack. 4537 */ 4538 int 4539 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4540 vm_prot_t prot, vm_prot_t max, int cow) 4541 { 4542 vm_size_t growsize, init_ssize; 4543 rlim_t vmemlim; 4544 int rv; 4545 4546 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4547 growsize = sgrowsiz; 4548 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4549 vm_map_lock(map); 4550 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4551 /* If we would blow our VMEM resource limit, no go */ 4552 if (map->size + init_ssize > vmemlim) { 4553 rv = KERN_NO_SPACE; 4554 goto out; 4555 } 4556 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4557 max, cow); 4558 out: 4559 vm_map_unlock(map); 4560 return (rv); 4561 } 4562 4563 static int stack_guard_page = 1; 4564 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4565 &stack_guard_page, 0, 4566 "Specifies the number of guard pages for a stack that grows"); 4567 4568 static int 4569 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4570 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4571 { 4572 vm_map_entry_t gap_entry, new_entry, prev_entry; 4573 vm_offset_t bot, gap_bot, gap_top, top; 4574 vm_size_t init_ssize, sgp; 4575 int orient, rv; 4576 4577 /* 4578 * The stack orientation is piggybacked with the cow argument. 4579 * Extract it into orient and mask the cow argument so that we 4580 * don't pass it around further. 4581 */ 4582 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 4583 KASSERT(orient != 0, ("No stack grow direction")); 4584 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 4585 ("bi-dir stack")); 4586 4587 if (max_ssize == 0 || 4588 !vm_map_range_valid(map, addrbos, addrbos + max_ssize)) 4589 return (KERN_INVALID_ADDRESS); 4590 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4591 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4592 (vm_size_t)stack_guard_page * PAGE_SIZE; 4593 if (sgp >= max_ssize) 4594 return (KERN_INVALID_ARGUMENT); 4595 4596 init_ssize = growsize; 4597 if (max_ssize < init_ssize + sgp) 4598 init_ssize = max_ssize - sgp; 4599 4600 /* If addr is already mapped, no go */ 4601 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4602 return (KERN_NO_SPACE); 4603 4604 /* 4605 * If we can't accommodate max_ssize in the current mapping, no go. 4606 */ 4607 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) 4608 return (KERN_NO_SPACE); 4609 4610 /* 4611 * We initially map a stack of only init_ssize. We will grow as 4612 * needed later. Depending on the orientation of the stack (i.e. 4613 * the grow direction) we either map at the top of the range, the 4614 * bottom of the range or in the middle. 4615 * 4616 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4617 * and cow to be 0. Possibly we should eliminate these as input 4618 * parameters, and just pass these values here in the insert call. 4619 */ 4620 if (orient == MAP_STACK_GROWS_DOWN) { 4621 bot = addrbos + max_ssize - init_ssize; 4622 top = bot + init_ssize; 4623 gap_bot = addrbos; 4624 gap_top = bot; 4625 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 4626 bot = addrbos; 4627 top = bot + init_ssize; 4628 gap_bot = top; 4629 gap_top = addrbos + max_ssize; 4630 } 4631 rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow, 4632 &new_entry); 4633 if (rv != KERN_SUCCESS) 4634 return (rv); 4635 KASSERT(new_entry->end == top || new_entry->start == bot, 4636 ("Bad entry start/end for new stack entry")); 4637 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 4638 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4639 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4640 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 4641 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 4642 ("new entry lacks MAP_ENTRY_GROWS_UP")); 4643 if (gap_bot == gap_top) 4644 return (KERN_SUCCESS); 4645 rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4646 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 4647 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry); 4648 if (rv == KERN_SUCCESS) { 4649 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, 4650 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); 4651 KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4652 MAP_ENTRY_STACK_GAP_UP)) != 0, 4653 ("entry %p not stack gap %#x", gap_entry, 4654 gap_entry->eflags)); 4655 4656 /* 4657 * Gap can never successfully handle a fault, so 4658 * read-ahead logic is never used for it. Re-use 4659 * next_read of the gap entry to store 4660 * stack_guard_page for vm_map_growstack(). 4661 * Similarly, since a gap cannot have a backing object, 4662 * store the original stack protections in the 4663 * object offset. 4664 */ 4665 gap_entry->next_read = sgp; 4666 gap_entry->offset = prot | PROT_MAX(max); 4667 } else { 4668 (void)vm_map_delete(map, bot, top); 4669 } 4670 return (rv); 4671 } 4672 4673 /* 4674 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4675 * successfully grow the stack. 4676 */ 4677 static int 4678 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4679 { 4680 vm_map_entry_t stack_entry; 4681 struct proc *p; 4682 struct vmspace *vm; 4683 struct ucred *cred; 4684 vm_offset_t gap_end, gap_start, grow_start; 4685 vm_size_t grow_amount, guard, max_grow, sgp; 4686 vm_prot_t prot, max; 4687 rlim_t lmemlim, stacklim, vmemlim; 4688 int rv, rv1 __diagused; 4689 bool gap_deleted, grow_down, is_procstack; 4690 #ifdef notyet 4691 uint64_t limit; 4692 #endif 4693 #ifdef RACCT 4694 int error __diagused; 4695 #endif 4696 4697 p = curproc; 4698 vm = p->p_vmspace; 4699 4700 /* 4701 * Disallow stack growth when the access is performed by a 4702 * debugger or AIO daemon. The reason is that the wrong 4703 * resource limits are applied. 4704 */ 4705 if (p != initproc && (map != &p->p_vmspace->vm_map || 4706 p->p_textvp == NULL)) 4707 return (KERN_FAILURE); 4708 4709 MPASS(!map->system_map); 4710 4711 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4712 stacklim = lim_cur(curthread, RLIMIT_STACK); 4713 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4714 retry: 4715 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4716 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4717 return (KERN_FAILURE); 4718 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4719 return (KERN_SUCCESS); 4720 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 4721 stack_entry = vm_map_entry_succ(gap_entry); 4722 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4723 stack_entry->start != gap_entry->end) 4724 return (KERN_FAILURE); 4725 grow_amount = round_page(stack_entry->start - addr); 4726 grow_down = true; 4727 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 4728 stack_entry = vm_map_entry_pred(gap_entry); 4729 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 4730 stack_entry->end != gap_entry->start) 4731 return (KERN_FAILURE); 4732 grow_amount = round_page(addr + 1 - stack_entry->end); 4733 grow_down = false; 4734 } else { 4735 return (KERN_FAILURE); 4736 } 4737 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4738 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4739 gap_entry->next_read; 4740 max_grow = gap_entry->end - gap_entry->start; 4741 if (guard > max_grow) 4742 return (KERN_NO_SPACE); 4743 max_grow -= guard; 4744 if (grow_amount > max_grow) 4745 return (KERN_NO_SPACE); 4746 4747 /* 4748 * If this is the main process stack, see if we're over the stack 4749 * limit. 4750 */ 4751 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4752 addr < (vm_offset_t)vm->vm_stacktop; 4753 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4754 return (KERN_NO_SPACE); 4755 4756 #ifdef RACCT 4757 if (racct_enable) { 4758 PROC_LOCK(p); 4759 if (is_procstack && racct_set(p, RACCT_STACK, 4760 ctob(vm->vm_ssize) + grow_amount)) { 4761 PROC_UNLOCK(p); 4762 return (KERN_NO_SPACE); 4763 } 4764 PROC_UNLOCK(p); 4765 } 4766 #endif 4767 4768 grow_amount = roundup(grow_amount, sgrowsiz); 4769 if (grow_amount > max_grow) 4770 grow_amount = max_grow; 4771 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4772 grow_amount = trunc_page((vm_size_t)stacklim) - 4773 ctob(vm->vm_ssize); 4774 } 4775 4776 #ifdef notyet 4777 PROC_LOCK(p); 4778 limit = racct_get_available(p, RACCT_STACK); 4779 PROC_UNLOCK(p); 4780 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4781 grow_amount = limit - ctob(vm->vm_ssize); 4782 #endif 4783 4784 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4785 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4786 rv = KERN_NO_SPACE; 4787 goto out; 4788 } 4789 #ifdef RACCT 4790 if (racct_enable) { 4791 PROC_LOCK(p); 4792 if (racct_set(p, RACCT_MEMLOCK, 4793 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4794 PROC_UNLOCK(p); 4795 rv = KERN_NO_SPACE; 4796 goto out; 4797 } 4798 PROC_UNLOCK(p); 4799 } 4800 #endif 4801 } 4802 4803 /* If we would blow our VMEM resource limit, no go */ 4804 if (map->size + grow_amount > vmemlim) { 4805 rv = KERN_NO_SPACE; 4806 goto out; 4807 } 4808 #ifdef RACCT 4809 if (racct_enable) { 4810 PROC_LOCK(p); 4811 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4812 PROC_UNLOCK(p); 4813 rv = KERN_NO_SPACE; 4814 goto out; 4815 } 4816 PROC_UNLOCK(p); 4817 } 4818 #endif 4819 4820 if (vm_map_lock_upgrade(map)) { 4821 gap_entry = NULL; 4822 vm_map_lock_read(map); 4823 goto retry; 4824 } 4825 4826 if (grow_down) { 4827 /* 4828 * The gap_entry "offset" field is overloaded. See 4829 * vm_map_stack_locked(). 4830 */ 4831 prot = PROT_EXTRACT(gap_entry->offset); 4832 max = PROT_MAX_EXTRACT(gap_entry->offset); 4833 sgp = gap_entry->next_read; 4834 4835 grow_start = gap_entry->end - grow_amount; 4836 if (gap_entry->start + grow_amount == gap_entry->end) { 4837 gap_start = gap_entry->start; 4838 gap_end = gap_entry->end; 4839 vm_map_entry_delete(map, gap_entry); 4840 gap_deleted = true; 4841 } else { 4842 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4843 vm_map_entry_resize(map, gap_entry, -grow_amount); 4844 gap_deleted = false; 4845 } 4846 rv = vm_map_insert(map, NULL, 0, grow_start, 4847 grow_start + grow_amount, prot, max, MAP_STACK_GROWS_DOWN); 4848 if (rv != KERN_SUCCESS) { 4849 if (gap_deleted) { 4850 rv1 = vm_map_insert1(map, NULL, 0, gap_start, 4851 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4852 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN, 4853 &gap_entry); 4854 MPASS(rv1 == KERN_SUCCESS); 4855 gap_entry->next_read = sgp; 4856 gap_entry->offset = prot | PROT_MAX(max); 4857 } else 4858 vm_map_entry_resize(map, gap_entry, 4859 grow_amount); 4860 } 4861 } else { 4862 grow_start = stack_entry->end; 4863 cred = stack_entry->cred; 4864 if (cred == NULL && stack_entry->object.vm_object != NULL) 4865 cred = stack_entry->object.vm_object->cred; 4866 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 4867 rv = KERN_NO_SPACE; 4868 /* Grow the underlying object if applicable. */ 4869 else if (stack_entry->object.vm_object == NULL || 4870 vm_object_coalesce(stack_entry->object.vm_object, 4871 stack_entry->offset, 4872 (vm_size_t)(stack_entry->end - stack_entry->start), 4873 grow_amount, cred != NULL)) { 4874 if (gap_entry->start + grow_amount == gap_entry->end) { 4875 vm_map_entry_delete(map, gap_entry); 4876 vm_map_entry_resize(map, stack_entry, 4877 grow_amount); 4878 } else { 4879 gap_entry->start += grow_amount; 4880 stack_entry->end += grow_amount; 4881 } 4882 map->size += grow_amount; 4883 rv = KERN_SUCCESS; 4884 } else 4885 rv = KERN_FAILURE; 4886 } 4887 if (rv == KERN_SUCCESS && is_procstack) 4888 vm->vm_ssize += btoc(grow_amount); 4889 4890 /* 4891 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4892 */ 4893 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4894 rv = vm_map_wire_locked(map, grow_start, 4895 grow_start + grow_amount, 4896 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4897 } 4898 vm_map_lock_downgrade(map); 4899 4900 out: 4901 #ifdef RACCT 4902 if (racct_enable && rv != KERN_SUCCESS) { 4903 PROC_LOCK(p); 4904 error = racct_set(p, RACCT_VMEM, map->size); 4905 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4906 if (!old_mlock) { 4907 error = racct_set(p, RACCT_MEMLOCK, 4908 ptoa(pmap_wired_count(map->pmap))); 4909 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4910 } 4911 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4912 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4913 PROC_UNLOCK(p); 4914 } 4915 #endif 4916 4917 return (rv); 4918 } 4919 4920 /* 4921 * Unshare the specified VM space for exec. If other processes are 4922 * mapped to it, then create a new one. The new vmspace is null. 4923 */ 4924 int 4925 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4926 { 4927 struct vmspace *oldvmspace = p->p_vmspace; 4928 struct vmspace *newvmspace; 4929 4930 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4931 ("vmspace_exec recursed")); 4932 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4933 if (newvmspace == NULL) 4934 return (ENOMEM); 4935 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4936 /* 4937 * This code is written like this for prototype purposes. The 4938 * goal is to avoid running down the vmspace here, but let the 4939 * other process's that are still using the vmspace to finally 4940 * run it down. Even though there is little or no chance of blocking 4941 * here, it is a good idea to keep this form for future mods. 4942 */ 4943 PROC_VMSPACE_LOCK(p); 4944 p->p_vmspace = newvmspace; 4945 PROC_VMSPACE_UNLOCK(p); 4946 if (p == curthread->td_proc) 4947 pmap_activate(curthread); 4948 curthread->td_pflags |= TDP_EXECVMSPC; 4949 return (0); 4950 } 4951 4952 /* 4953 * Unshare the specified VM space for forcing COW. This 4954 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4955 */ 4956 int 4957 vmspace_unshare(struct proc *p) 4958 { 4959 struct vmspace *oldvmspace = p->p_vmspace; 4960 struct vmspace *newvmspace; 4961 vm_ooffset_t fork_charge; 4962 4963 /* 4964 * The caller is responsible for ensuring that the reference count 4965 * cannot concurrently transition 1 -> 2. 4966 */ 4967 if (refcount_load(&oldvmspace->vm_refcnt) == 1) 4968 return (0); 4969 fork_charge = 0; 4970 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4971 if (newvmspace == NULL) 4972 return (ENOMEM); 4973 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4974 vmspace_free(newvmspace); 4975 return (ENOMEM); 4976 } 4977 PROC_VMSPACE_LOCK(p); 4978 p->p_vmspace = newvmspace; 4979 PROC_VMSPACE_UNLOCK(p); 4980 if (p == curthread->td_proc) 4981 pmap_activate(curthread); 4982 vmspace_free(oldvmspace); 4983 return (0); 4984 } 4985 4986 /* 4987 * vm_map_lookup: 4988 * 4989 * Finds the VM object, offset, and 4990 * protection for a given virtual address in the 4991 * specified map, assuming a page fault of the 4992 * type specified. 4993 * 4994 * Leaves the map in question locked for read; return 4995 * values are guaranteed until a vm_map_lookup_done 4996 * call is performed. Note that the map argument 4997 * is in/out; the returned map must be used in 4998 * the call to vm_map_lookup_done. 4999 * 5000 * A handle (out_entry) is returned for use in 5001 * vm_map_lookup_done, to make that fast. 5002 * 5003 * If a lookup is requested with "write protection" 5004 * specified, the map may be changed to perform virtual 5005 * copying operations, although the data referenced will 5006 * remain the same. 5007 */ 5008 int 5009 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 5010 vm_offset_t vaddr, 5011 vm_prot_t fault_typea, 5012 vm_map_entry_t *out_entry, /* OUT */ 5013 vm_object_t *object, /* OUT */ 5014 vm_pindex_t *pindex, /* OUT */ 5015 vm_prot_t *out_prot, /* OUT */ 5016 boolean_t *wired) /* OUT */ 5017 { 5018 vm_map_entry_t entry; 5019 vm_map_t map = *var_map; 5020 vm_prot_t prot; 5021 vm_prot_t fault_type; 5022 vm_object_t eobject; 5023 vm_size_t size; 5024 struct ucred *cred; 5025 5026 RetryLookup: 5027 5028 vm_map_lock_read(map); 5029 5030 RetryLookupLocked: 5031 /* 5032 * Lookup the faulting address. 5033 */ 5034 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 5035 vm_map_unlock_read(map); 5036 return (KERN_INVALID_ADDRESS); 5037 } 5038 5039 entry = *out_entry; 5040 5041 /* 5042 * Handle submaps. 5043 */ 5044 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5045 vm_map_t old_map = map; 5046 5047 *var_map = map = entry->object.sub_map; 5048 vm_map_unlock_read(old_map); 5049 goto RetryLookup; 5050 } 5051 5052 /* 5053 * Check whether this task is allowed to have this page. 5054 */ 5055 prot = entry->protection; 5056 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 5057 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 5058 if (prot == VM_PROT_NONE && map != kernel_map && 5059 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 5060 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 5061 MAP_ENTRY_STACK_GAP_UP)) != 0 && 5062 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 5063 goto RetryLookupLocked; 5064 } 5065 fault_type = fault_typea & VM_PROT_ALL; 5066 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 5067 vm_map_unlock_read(map); 5068 return (KERN_PROTECTION_FAILURE); 5069 } 5070 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 5071 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 5072 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 5073 ("entry %p flags %x", entry, entry->eflags)); 5074 if ((fault_typea & VM_PROT_COPY) != 0 && 5075 (entry->max_protection & VM_PROT_WRITE) == 0 && 5076 (entry->eflags & MAP_ENTRY_COW) == 0) { 5077 vm_map_unlock_read(map); 5078 return (KERN_PROTECTION_FAILURE); 5079 } 5080 5081 /* 5082 * If this page is not pageable, we have to get it for all possible 5083 * accesses. 5084 */ 5085 *wired = (entry->wired_count != 0); 5086 if (*wired) 5087 fault_type = entry->protection; 5088 size = entry->end - entry->start; 5089 5090 /* 5091 * If the entry was copy-on-write, we either ... 5092 */ 5093 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5094 /* 5095 * If we want to write the page, we may as well handle that 5096 * now since we've got the map locked. 5097 * 5098 * If we don't need to write the page, we just demote the 5099 * permissions allowed. 5100 */ 5101 if ((fault_type & VM_PROT_WRITE) != 0 || 5102 (fault_typea & VM_PROT_COPY) != 0) { 5103 /* 5104 * Make a new object, and place it in the object 5105 * chain. Note that no new references have appeared 5106 * -- one just moved from the map to the new 5107 * object. 5108 */ 5109 if (vm_map_lock_upgrade(map)) 5110 goto RetryLookup; 5111 5112 if (entry->cred == NULL) { 5113 /* 5114 * The debugger owner is charged for 5115 * the memory. 5116 */ 5117 cred = curthread->td_ucred; 5118 crhold(cred); 5119 if (!swap_reserve_by_cred(size, cred)) { 5120 crfree(cred); 5121 vm_map_unlock(map); 5122 return (KERN_RESOURCE_SHORTAGE); 5123 } 5124 entry->cred = cred; 5125 } 5126 eobject = entry->object.vm_object; 5127 vm_object_shadow(&entry->object.vm_object, 5128 &entry->offset, size, entry->cred, false); 5129 if (eobject == entry->object.vm_object) { 5130 /* 5131 * The object was not shadowed. 5132 */ 5133 swap_release_by_cred(size, entry->cred); 5134 crfree(entry->cred); 5135 } 5136 entry->cred = NULL; 5137 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 5138 5139 vm_map_lock_downgrade(map); 5140 } else { 5141 /* 5142 * We're attempting to read a copy-on-write page -- 5143 * don't allow writes. 5144 */ 5145 prot &= ~VM_PROT_WRITE; 5146 } 5147 } 5148 5149 /* 5150 * Create an object if necessary. 5151 */ 5152 if (entry->object.vm_object == NULL && !map->system_map) { 5153 if (vm_map_lock_upgrade(map)) 5154 goto RetryLookup; 5155 entry->object.vm_object = vm_object_allocate_anon(atop(size), 5156 NULL, entry->cred, size); 5157 entry->offset = 0; 5158 entry->cred = NULL; 5159 vm_map_lock_downgrade(map); 5160 } 5161 5162 /* 5163 * Return the object/offset from this entry. If the entry was 5164 * copy-on-write or empty, it has been fixed up. 5165 */ 5166 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5167 *object = entry->object.vm_object; 5168 5169 *out_prot = prot; 5170 return (KERN_SUCCESS); 5171 } 5172 5173 /* 5174 * vm_map_lookup_locked: 5175 * 5176 * Lookup the faulting address. A version of vm_map_lookup that returns 5177 * KERN_FAILURE instead of blocking on map lock or memory allocation. 5178 */ 5179 int 5180 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 5181 vm_offset_t vaddr, 5182 vm_prot_t fault_typea, 5183 vm_map_entry_t *out_entry, /* OUT */ 5184 vm_object_t *object, /* OUT */ 5185 vm_pindex_t *pindex, /* OUT */ 5186 vm_prot_t *out_prot, /* OUT */ 5187 boolean_t *wired) /* OUT */ 5188 { 5189 vm_map_entry_t entry; 5190 vm_map_t map = *var_map; 5191 vm_prot_t prot; 5192 vm_prot_t fault_type = fault_typea; 5193 5194 /* 5195 * Lookup the faulting address. 5196 */ 5197 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 5198 return (KERN_INVALID_ADDRESS); 5199 5200 entry = *out_entry; 5201 5202 /* 5203 * Fail if the entry refers to a submap. 5204 */ 5205 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 5206 return (KERN_FAILURE); 5207 5208 /* 5209 * Check whether this task is allowed to have this page. 5210 */ 5211 prot = entry->protection; 5212 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 5213 if ((fault_type & prot) != fault_type) 5214 return (KERN_PROTECTION_FAILURE); 5215 5216 /* 5217 * If this page is not pageable, we have to get it for all possible 5218 * accesses. 5219 */ 5220 *wired = (entry->wired_count != 0); 5221 if (*wired) 5222 fault_type = entry->protection; 5223 5224 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5225 /* 5226 * Fail if the entry was copy-on-write for a write fault. 5227 */ 5228 if (fault_type & VM_PROT_WRITE) 5229 return (KERN_FAILURE); 5230 /* 5231 * We're attempting to read a copy-on-write page -- 5232 * don't allow writes. 5233 */ 5234 prot &= ~VM_PROT_WRITE; 5235 } 5236 5237 /* 5238 * Fail if an object should be created. 5239 */ 5240 if (entry->object.vm_object == NULL && !map->system_map) 5241 return (KERN_FAILURE); 5242 5243 /* 5244 * Return the object/offset from this entry. If the entry was 5245 * copy-on-write or empty, it has been fixed up. 5246 */ 5247 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5248 *object = entry->object.vm_object; 5249 5250 *out_prot = prot; 5251 return (KERN_SUCCESS); 5252 } 5253 5254 /* 5255 * vm_map_lookup_done: 5256 * 5257 * Releases locks acquired by a vm_map_lookup 5258 * (according to the handle returned by that lookup). 5259 */ 5260 void 5261 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 5262 { 5263 /* 5264 * Unlock the main-level map 5265 */ 5266 vm_map_unlock_read(map); 5267 } 5268 5269 vm_offset_t 5270 vm_map_max_KBI(const struct vm_map *map) 5271 { 5272 5273 return (vm_map_max(map)); 5274 } 5275 5276 vm_offset_t 5277 vm_map_min_KBI(const struct vm_map *map) 5278 { 5279 5280 return (vm_map_min(map)); 5281 } 5282 5283 pmap_t 5284 vm_map_pmap_KBI(vm_map_t map) 5285 { 5286 5287 return (map->pmap); 5288 } 5289 5290 bool 5291 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end) 5292 { 5293 5294 return (vm_map_range_valid(map, start, end)); 5295 } 5296 5297 #ifdef INVARIANTS 5298 static void 5299 _vm_map_assert_consistent(vm_map_t map, int check) 5300 { 5301 vm_map_entry_t entry, prev; 5302 vm_map_entry_t cur, header, lbound, ubound; 5303 vm_size_t max_left, max_right; 5304 5305 #ifdef DIAGNOSTIC 5306 ++map->nupdates; 5307 #endif 5308 if (enable_vmmap_check != check) 5309 return; 5310 5311 header = prev = &map->header; 5312 VM_MAP_ENTRY_FOREACH(entry, map) { 5313 KASSERT(prev->end <= entry->start, 5314 ("map %p prev->end = %jx, start = %jx", map, 5315 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5316 KASSERT(entry->start < entry->end, 5317 ("map %p start = %jx, end = %jx", map, 5318 (uintmax_t)entry->start, (uintmax_t)entry->end)); 5319 KASSERT(entry->left == header || 5320 entry->left->start < entry->start, 5321 ("map %p left->start = %jx, start = %jx", map, 5322 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 5323 KASSERT(entry->right == header || 5324 entry->start < entry->right->start, 5325 ("map %p start = %jx, right->start = %jx", map, 5326 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 5327 cur = map->root; 5328 lbound = ubound = header; 5329 for (;;) { 5330 if (entry->start < cur->start) { 5331 ubound = cur; 5332 cur = cur->left; 5333 KASSERT(cur != lbound, 5334 ("map %p cannot find %jx", 5335 map, (uintmax_t)entry->start)); 5336 } else if (cur->end <= entry->start) { 5337 lbound = cur; 5338 cur = cur->right; 5339 KASSERT(cur != ubound, 5340 ("map %p cannot find %jx", 5341 map, (uintmax_t)entry->start)); 5342 } else { 5343 KASSERT(cur == entry, 5344 ("map %p cannot find %jx", 5345 map, (uintmax_t)entry->start)); 5346 break; 5347 } 5348 } 5349 max_left = vm_map_entry_max_free_left(entry, lbound); 5350 max_right = vm_map_entry_max_free_right(entry, ubound); 5351 KASSERT(entry->max_free == vm_size_max(max_left, max_right), 5352 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 5353 (uintmax_t)entry->max_free, 5354 (uintmax_t)max_left, (uintmax_t)max_right)); 5355 prev = entry; 5356 } 5357 KASSERT(prev->end <= entry->start, 5358 ("map %p prev->end = %jx, start = %jx", map, 5359 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5360 } 5361 #endif 5362 5363 #include "opt_ddb.h" 5364 #ifdef DDB 5365 #include <sys/kernel.h> 5366 5367 #include <ddb/ddb.h> 5368 5369 static void 5370 vm_map_print(vm_map_t map) 5371 { 5372 vm_map_entry_t entry, prev; 5373 5374 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 5375 (void *)map, 5376 (void *)map->pmap, map->nentries, map->timestamp); 5377 5378 db_indent += 2; 5379 prev = &map->header; 5380 VM_MAP_ENTRY_FOREACH(entry, map) { 5381 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 5382 (void *)entry, (void *)entry->start, (void *)entry->end, 5383 entry->eflags); 5384 { 5385 static const char * const inheritance_name[4] = 5386 {"share", "copy", "none", "donate_copy"}; 5387 5388 db_iprintf(" prot=%x/%x/%s", 5389 entry->protection, 5390 entry->max_protection, 5391 inheritance_name[(int)(unsigned char) 5392 entry->inheritance]); 5393 if (entry->wired_count != 0) 5394 db_printf(", wired"); 5395 } 5396 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5397 db_printf(", share=%p, offset=0x%jx\n", 5398 (void *)entry->object.sub_map, 5399 (uintmax_t)entry->offset); 5400 if (prev == &map->header || 5401 prev->object.sub_map != 5402 entry->object.sub_map) { 5403 db_indent += 2; 5404 vm_map_print((vm_map_t)entry->object.sub_map); 5405 db_indent -= 2; 5406 } 5407 } else { 5408 if (entry->cred != NULL) 5409 db_printf(", ruid %d", entry->cred->cr_ruid); 5410 db_printf(", object=%p, offset=0x%jx", 5411 (void *)entry->object.vm_object, 5412 (uintmax_t)entry->offset); 5413 if (entry->object.vm_object && entry->object.vm_object->cred) 5414 db_printf(", obj ruid %d charge %jx", 5415 entry->object.vm_object->cred->cr_ruid, 5416 (uintmax_t)entry->object.vm_object->charge); 5417 if (entry->eflags & MAP_ENTRY_COW) 5418 db_printf(", copy (%s)", 5419 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 5420 db_printf("\n"); 5421 5422 if (prev == &map->header || 5423 prev->object.vm_object != 5424 entry->object.vm_object) { 5425 db_indent += 2; 5426 vm_object_print((db_expr_t)(intptr_t) 5427 entry->object.vm_object, 5428 0, 0, (char *)0); 5429 db_indent -= 2; 5430 } 5431 } 5432 prev = entry; 5433 } 5434 db_indent -= 2; 5435 } 5436 5437 DB_SHOW_COMMAND(map, map) 5438 { 5439 5440 if (!have_addr) { 5441 db_printf("usage: show map <addr>\n"); 5442 return; 5443 } 5444 vm_map_print((vm_map_t)addr); 5445 } 5446 5447 DB_SHOW_COMMAND(procvm, procvm) 5448 { 5449 struct proc *p; 5450 5451 if (have_addr) { 5452 p = db_lookup_proc(addr); 5453 } else { 5454 p = curproc; 5455 } 5456 5457 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 5458 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 5459 (void *)vmspace_pmap(p->p_vmspace)); 5460 5461 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 5462 } 5463 5464 #endif /* DDB */ 5465