1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/elf.h> 68 #include <sys/kernel.h> 69 #include <sys/ktr.h> 70 #include <sys/lock.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/vmmeter.h> 74 #include <sys/mman.h> 75 #include <sys/vnode.h> 76 #include <sys/racct.h> 77 #include <sys/resourcevar.h> 78 #include <sys/rwlock.h> 79 #include <sys/file.h> 80 #include <sys/sysctl.h> 81 #include <sys/sysent.h> 82 #include <sys/shm.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_pager.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vnode_pager.h> 95 #include <vm/swap_pager.h> 96 #include <vm/uma.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, 100 * and sharing of virtual memory objects. In addition, 101 * this module provides for an efficient virtual copy of 102 * memory from one map to another. 103 * 104 * Synchronization is required prior to most operations. 105 * 106 * Maps consist of an ordered doubly-linked list of simple 107 * entries; a self-adjusting binary search tree of these 108 * entries is used to speed up lookups. 109 * 110 * Since portions of maps are specified by start/end addresses, 111 * which may not align with existing map entries, all 112 * routines merely "clip" entries to these start/end values. 113 * [That is, an entry is split into two, bordering at a 114 * start or end value.] Note that these clippings may not 115 * always be necessary (as the two resulting entries are then 116 * not changed); however, the clipping is done for convenience. 117 * 118 * As mentioned above, virtual copy operations are performed 119 * by copying VM object references from one map to 120 * another, and then marking both regions as copy-on-write. 121 */ 122 123 static struct mtx map_sleep_mtx; 124 static uma_zone_t mapentzone; 125 static uma_zone_t kmapentzone; 126 static uma_zone_t vmspace_zone; 127 static int vmspace_zinit(void *mem, int size, int flags); 128 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 129 vm_offset_t max); 130 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 133 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 134 vm_map_entry_t gap_entry); 135 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 136 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 137 #ifdef INVARIANTS 138 static void vmspace_zdtor(void *mem, int size, void *arg); 139 #endif 140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 141 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 142 int cow); 143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 144 vm_offset_t failed_addr); 145 146 #define CONTAINS_BITS(set, bits) ((~(set) & (bits)) == 0) 147 148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 151 152 /* 153 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 154 * stable. 155 */ 156 #define PROC_VMSPACE_LOCK(p) do { } while (0) 157 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 158 159 /* 160 * VM_MAP_RANGE_CHECK: [ internal use only ] 161 * 162 * Asserts that the starting and ending region 163 * addresses fall within the valid range of the map. 164 */ 165 #define VM_MAP_RANGE_CHECK(map, start, end) \ 166 { \ 167 if (start < vm_map_min(map)) \ 168 start = vm_map_min(map); \ 169 if (end > vm_map_max(map)) \ 170 end = vm_map_max(map); \ 171 if (start > end) \ 172 start = end; \ 173 } 174 175 #ifndef UMA_USE_DMAP 176 177 /* 178 * Allocate a new slab for kernel map entries. The kernel map may be locked or 179 * unlocked, depending on whether the request is coming from the kernel map or a 180 * submap. This function allocates a virtual address range directly from the 181 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map 182 * lock and also to avoid triggering allocator recursion in the vmem boundary 183 * tag allocator. 184 */ 185 static void * 186 kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 187 int wait) 188 { 189 vm_offset_t addr; 190 int error, locked; 191 192 *pflag = UMA_SLAB_PRIV; 193 194 if (!(locked = vm_map_locked(kernel_map))) 195 vm_map_lock(kernel_map); 196 addr = vm_map_findspace(kernel_map, vm_map_min(kernel_map), bytes); 197 if (addr + bytes < addr || addr + bytes > vm_map_max(kernel_map)) 198 panic("%s: kernel map is exhausted", __func__); 199 error = vm_map_insert(kernel_map, NULL, 0, addr, addr + bytes, 200 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 201 if (error != KERN_SUCCESS) 202 panic("%s: vm_map_insert() failed: %d", __func__, error); 203 if (!locked) 204 vm_map_unlock(kernel_map); 205 error = kmem_back_domain(domain, kernel_object, addr, bytes, M_NOWAIT | 206 M_USE_RESERVE | (wait & M_ZERO)); 207 if (error == KERN_SUCCESS) { 208 return ((void *)addr); 209 } else { 210 if (!locked) 211 vm_map_lock(kernel_map); 212 vm_map_delete(kernel_map, addr, bytes); 213 if (!locked) 214 vm_map_unlock(kernel_map); 215 return (NULL); 216 } 217 } 218 219 static void 220 kmapent_free(void *item, vm_size_t size, uint8_t pflag) 221 { 222 vm_offset_t addr; 223 int error __diagused; 224 225 if ((pflag & UMA_SLAB_PRIV) == 0) 226 /* XXX leaked */ 227 return; 228 229 addr = (vm_offset_t)item; 230 kmem_unback(kernel_object, addr, size); 231 error = vm_map_remove(kernel_map, addr, addr + size); 232 KASSERT(error == KERN_SUCCESS, 233 ("%s: vm_map_remove failed: %d", __func__, error)); 234 } 235 236 /* 237 * The worst-case upper bound on the number of kernel map entries that may be 238 * created before the zone must be replenished in _vm_map_unlock(). 239 */ 240 #define KMAPENT_RESERVE 1 241 242 #endif /* !UMD_MD_SMALL_ALLOC */ 243 244 /* 245 * vm_map_startup: 246 * 247 * Initialize the vm_map module. Must be called before any other vm_map 248 * routines. 249 * 250 * User map and entry structures are allocated from the general purpose 251 * memory pool. Kernel maps are statically defined. Kernel map entries 252 * require special handling to avoid recursion; see the comments above 253 * kmapent_alloc() and in vm_map_entry_create(). 254 */ 255 void 256 vm_map_startup(void) 257 { 258 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 259 260 /* 261 * Disable the use of per-CPU buckets: map entry allocation is 262 * serialized by the kernel map lock. 263 */ 264 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 266 UMA_ZONE_VM | UMA_ZONE_NOBUCKET); 267 #ifndef UMA_USE_DMAP 268 /* Reserve an extra map entry for use when replenishing the reserve. */ 269 uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1); 270 uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1); 271 uma_zone_set_allocf(kmapentzone, kmapent_alloc); 272 uma_zone_set_freef(kmapentzone, kmapent_free); 273 #endif 274 275 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 276 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 277 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 278 #ifdef INVARIANTS 279 vmspace_zdtor, 280 #else 281 NULL, 282 #endif 283 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 284 } 285 286 static int 287 vmspace_zinit(void *mem, int size, int flags) 288 { 289 struct vmspace *vm; 290 vm_map_t map; 291 292 vm = (struct vmspace *)mem; 293 map = &vm->vm_map; 294 295 memset(map, 0, sizeof(*map)); 296 mtx_init(&map->system_mtx, "vm map (system)", NULL, 297 MTX_DEF | MTX_DUPOK); 298 sx_init(&map->lock, "vm map (user)"); 299 PMAP_LOCK_INIT(vmspace_pmap(vm)); 300 return (0); 301 } 302 303 #ifdef INVARIANTS 304 static void 305 vmspace_zdtor(void *mem, int size, void *arg) 306 { 307 struct vmspace *vm; 308 309 vm = (struct vmspace *)mem; 310 KASSERT(vm->vm_map.nentries == 0, 311 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); 312 KASSERT(vm->vm_map.size == 0, 313 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); 314 } 315 #endif /* INVARIANTS */ 316 317 /* 318 * Allocate a vmspace structure, including a vm_map and pmap, 319 * and initialize those structures. The refcnt is set to 1. 320 */ 321 struct vmspace * 322 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 323 { 324 struct vmspace *vm; 325 326 vm = uma_zalloc(vmspace_zone, M_WAITOK); 327 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 328 if (!pinit(vmspace_pmap(vm))) { 329 uma_zfree(vmspace_zone, vm); 330 return (NULL); 331 } 332 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 333 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 334 refcount_init(&vm->vm_refcnt, 1); 335 vm->vm_shm = NULL; 336 vm->vm_swrss = 0; 337 vm->vm_tsize = 0; 338 vm->vm_dsize = 0; 339 vm->vm_ssize = 0; 340 vm->vm_taddr = 0; 341 vm->vm_daddr = 0; 342 vm->vm_maxsaddr = 0; 343 return (vm); 344 } 345 346 #ifdef RACCT 347 static void 348 vmspace_container_reset(struct proc *p) 349 { 350 351 PROC_LOCK(p); 352 racct_set(p, RACCT_DATA, 0); 353 racct_set(p, RACCT_STACK, 0); 354 racct_set(p, RACCT_RSS, 0); 355 racct_set(p, RACCT_MEMLOCK, 0); 356 racct_set(p, RACCT_VMEM, 0); 357 PROC_UNLOCK(p); 358 } 359 #endif 360 361 static inline void 362 vmspace_dofree(struct vmspace *vm) 363 { 364 365 CTR1(KTR_VM, "vmspace_free: %p", vm); 366 367 /* 368 * Make sure any SysV shm is freed, it might not have been in 369 * exit1(). 370 */ 371 shmexit(vm); 372 373 /* 374 * Lock the map, to wait out all other references to it. 375 * Delete all of the mappings and pages they hold, then call 376 * the pmap module to reclaim anything left. 377 */ 378 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 379 vm_map_max(&vm->vm_map)); 380 381 pmap_release(vmspace_pmap(vm)); 382 vm->vm_map.pmap = NULL; 383 uma_zfree(vmspace_zone, vm); 384 } 385 386 void 387 vmspace_free(struct vmspace *vm) 388 { 389 390 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 391 "vmspace_free() called"); 392 393 if (refcount_release(&vm->vm_refcnt)) 394 vmspace_dofree(vm); 395 } 396 397 void 398 vmspace_exitfree(struct proc *p) 399 { 400 struct vmspace *vm; 401 402 PROC_VMSPACE_LOCK(p); 403 vm = p->p_vmspace; 404 p->p_vmspace = NULL; 405 PROC_VMSPACE_UNLOCK(p); 406 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 407 vmspace_free(vm); 408 } 409 410 void 411 vmspace_exit(struct thread *td) 412 { 413 struct vmspace *vm; 414 struct proc *p; 415 bool released; 416 417 p = td->td_proc; 418 vm = p->p_vmspace; 419 420 /* 421 * Prepare to release the vmspace reference. The thread that releases 422 * the last reference is responsible for tearing down the vmspace. 423 * However, threads not releasing the final reference must switch to the 424 * kernel's vmspace0 before the decrement so that the subsequent pmap 425 * deactivation does not modify a freed vmspace. 426 */ 427 refcount_acquire(&vmspace0.vm_refcnt); 428 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { 429 if (p->p_vmspace != &vmspace0) { 430 PROC_VMSPACE_LOCK(p); 431 p->p_vmspace = &vmspace0; 432 PROC_VMSPACE_UNLOCK(p); 433 pmap_activate(td); 434 } 435 released = refcount_release(&vm->vm_refcnt); 436 } 437 if (released) { 438 /* 439 * pmap_remove_pages() expects the pmap to be active, so switch 440 * back first if necessary. 441 */ 442 if (p->p_vmspace != vm) { 443 PROC_VMSPACE_LOCK(p); 444 p->p_vmspace = vm; 445 PROC_VMSPACE_UNLOCK(p); 446 pmap_activate(td); 447 } 448 pmap_remove_pages(vmspace_pmap(vm)); 449 PROC_VMSPACE_LOCK(p); 450 p->p_vmspace = &vmspace0; 451 PROC_VMSPACE_UNLOCK(p); 452 pmap_activate(td); 453 vmspace_dofree(vm); 454 } 455 #ifdef RACCT 456 if (racct_enable) 457 vmspace_container_reset(p); 458 #endif 459 } 460 461 /* Acquire reference to vmspace owned by another process. */ 462 463 struct vmspace * 464 vmspace_acquire_ref(struct proc *p) 465 { 466 struct vmspace *vm; 467 468 PROC_VMSPACE_LOCK(p); 469 vm = p->p_vmspace; 470 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { 471 PROC_VMSPACE_UNLOCK(p); 472 return (NULL); 473 } 474 if (vm != p->p_vmspace) { 475 PROC_VMSPACE_UNLOCK(p); 476 vmspace_free(vm); 477 return (NULL); 478 } 479 PROC_VMSPACE_UNLOCK(p); 480 return (vm); 481 } 482 483 /* 484 * Switch between vmspaces in an AIO kernel process. 485 * 486 * The new vmspace is either the vmspace of a user process obtained 487 * from an active AIO request or the initial vmspace of the AIO kernel 488 * process (when it is idling). Because user processes will block to 489 * drain any active AIO requests before proceeding in exit() or 490 * execve(), the reference count for vmspaces from AIO requests can 491 * never be 0. Similarly, AIO kernel processes hold an extra 492 * reference on their initial vmspace for the life of the process. As 493 * a result, the 'newvm' vmspace always has a non-zero reference 494 * count. This permits an additional reference on 'newvm' to be 495 * acquired via a simple atomic increment rather than the loop in 496 * vmspace_acquire_ref() above. 497 */ 498 void 499 vmspace_switch_aio(struct vmspace *newvm) 500 { 501 struct vmspace *oldvm; 502 503 /* XXX: Need some way to assert that this is an aio daemon. */ 504 505 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, 506 ("vmspace_switch_aio: newvm unreferenced")); 507 508 oldvm = curproc->p_vmspace; 509 if (oldvm == newvm) 510 return; 511 512 /* 513 * Point to the new address space and refer to it. 514 */ 515 curproc->p_vmspace = newvm; 516 refcount_acquire(&newvm->vm_refcnt); 517 518 /* Activate the new mapping. */ 519 pmap_activate(curthread); 520 521 vmspace_free(oldvm); 522 } 523 524 void 525 _vm_map_lock(vm_map_t map, const char *file, int line) 526 { 527 528 if (map->system_map) 529 mtx_lock_flags_(&map->system_mtx, 0, file, line); 530 else 531 sx_xlock_(&map->lock, file, line); 532 map->timestamp++; 533 } 534 535 void 536 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 537 { 538 vm_object_t object; 539 struct vnode *vp; 540 bool vp_held; 541 542 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 543 return; 544 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 545 ("Submap with execs")); 546 object = entry->object.vm_object; 547 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 548 if ((object->flags & OBJ_ANON) != 0) 549 object = object->handle; 550 else 551 KASSERT(object->backing_object == NULL, 552 ("non-anon object %p shadows", object)); 553 KASSERT(object != NULL, ("No content object for text, entry %p obj %p", 554 entry, entry->object.vm_object)); 555 556 /* 557 * Mostly, we do not lock the backing object. It is 558 * referenced by the entry we are processing, so it cannot go 559 * away. 560 */ 561 vm_pager_getvp(object, &vp, &vp_held); 562 if (vp != NULL) { 563 if (add) { 564 VOP_SET_TEXT_CHECKED(vp); 565 } else { 566 vn_lock(vp, LK_SHARED | LK_RETRY); 567 VOP_UNSET_TEXT_CHECKED(vp); 568 VOP_UNLOCK(vp); 569 } 570 if (vp_held) 571 vdrop(vp); 572 } 573 } 574 575 /* 576 * Use a different name for this vm_map_entry field when it's use 577 * is not consistent with its use as part of an ordered search tree. 578 */ 579 #define defer_next right 580 581 static void 582 vm_map_process_deferred(void) 583 { 584 struct thread *td; 585 vm_map_entry_t entry, next; 586 vm_object_t object; 587 588 td = curthread; 589 entry = td->td_map_def_user; 590 td->td_map_def_user = NULL; 591 while (entry != NULL) { 592 next = entry->defer_next; 593 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | 594 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | 595 MAP_ENTRY_VN_EXEC)); 596 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { 597 /* 598 * Decrement the object's writemappings and 599 * possibly the vnode's v_writecount. 600 */ 601 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 602 ("Submap with writecount")); 603 object = entry->object.vm_object; 604 KASSERT(object != NULL, ("No object for writecount")); 605 vm_pager_release_writecount(object, entry->start, 606 entry->end); 607 } 608 vm_map_entry_set_vnode_text(entry, false); 609 vm_map_entry_deallocate(entry, FALSE); 610 entry = next; 611 } 612 } 613 614 #ifdef INVARIANTS 615 static void 616 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 617 { 618 619 if (map->system_map) 620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 621 else 622 sx_assert_(&map->lock, SA_XLOCKED, file, line); 623 } 624 625 #define VM_MAP_ASSERT_LOCKED(map) \ 626 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 627 628 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL }; 629 #ifdef DIAGNOSTIC 630 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK; 631 #else 632 static int enable_vmmap_check = VMMAP_CHECK_NONE; 633 #endif 634 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 635 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 636 637 static void _vm_map_assert_consistent(vm_map_t map, int check); 638 639 #define VM_MAP_ASSERT_CONSISTENT(map) \ 640 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL) 641 #ifdef DIAGNOSTIC 642 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \ 643 if (map->nupdates > map->nentries) { \ 644 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \ 645 map->nupdates = 0; \ 646 } \ 647 } while (0) 648 #else 649 #define VM_MAP_UNLOCK_CONSISTENT(map) 650 #endif 651 #else 652 #define VM_MAP_ASSERT_LOCKED(map) 653 #define VM_MAP_ASSERT_CONSISTENT(map) 654 #define VM_MAP_UNLOCK_CONSISTENT(map) 655 #endif /* INVARIANTS */ 656 657 void 658 _vm_map_unlock(vm_map_t map, const char *file, int line) 659 { 660 661 VM_MAP_UNLOCK_CONSISTENT(map); 662 if (map->system_map) { 663 #ifndef UMA_USE_DMAP 664 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { 665 uma_prealloc(kmapentzone, 1); 666 map->flags &= ~MAP_REPLENISH; 667 } 668 #endif 669 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 670 } else { 671 sx_xunlock_(&map->lock, file, line); 672 vm_map_process_deferred(); 673 } 674 } 675 676 void 677 _vm_map_lock_read(vm_map_t map, const char *file, int line) 678 { 679 680 if (map->system_map) 681 mtx_lock_flags_(&map->system_mtx, 0, file, line); 682 else 683 sx_slock_(&map->lock, file, line); 684 } 685 686 void 687 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 688 { 689 690 if (map->system_map) { 691 KASSERT((map->flags & MAP_REPLENISH) == 0, 692 ("%s: MAP_REPLENISH leaked", __func__)); 693 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 694 } else { 695 sx_sunlock_(&map->lock, file, line); 696 vm_map_process_deferred(); 697 } 698 } 699 700 int 701 _vm_map_trylock(vm_map_t map, const char *file, int line) 702 { 703 int error; 704 705 error = map->system_map ? 706 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 707 !sx_try_xlock_(&map->lock, file, line); 708 if (error == 0) 709 map->timestamp++; 710 return (error == 0); 711 } 712 713 int 714 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 715 { 716 int error; 717 718 error = map->system_map ? 719 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 720 !sx_try_slock_(&map->lock, file, line); 721 return (error == 0); 722 } 723 724 /* 725 * _vm_map_lock_upgrade: [ internal use only ] 726 * 727 * Tries to upgrade a read (shared) lock on the specified map to a write 728 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 729 * non-zero value if the upgrade fails. If the upgrade fails, the map is 730 * returned without a read or write lock held. 731 * 732 * Requires that the map be read locked. 733 */ 734 int 735 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 736 { 737 unsigned int last_timestamp; 738 739 if (map->system_map) { 740 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 741 } else { 742 if (!sx_try_upgrade_(&map->lock, file, line)) { 743 last_timestamp = map->timestamp; 744 sx_sunlock_(&map->lock, file, line); 745 vm_map_process_deferred(); 746 /* 747 * If the map's timestamp does not change while the 748 * map is unlocked, then the upgrade succeeds. 749 */ 750 sx_xlock_(&map->lock, file, line); 751 if (last_timestamp != map->timestamp) { 752 sx_xunlock_(&map->lock, file, line); 753 return (1); 754 } 755 } 756 } 757 map->timestamp++; 758 return (0); 759 } 760 761 void 762 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 763 { 764 765 if (map->system_map) { 766 KASSERT((map->flags & MAP_REPLENISH) == 0, 767 ("%s: MAP_REPLENISH leaked", __func__)); 768 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 769 } else { 770 VM_MAP_UNLOCK_CONSISTENT(map); 771 sx_downgrade_(&map->lock, file, line); 772 } 773 } 774 775 /* 776 * vm_map_locked: 777 * 778 * Returns a non-zero value if the caller holds a write (exclusive) lock 779 * on the specified map and the value "0" otherwise. 780 */ 781 int 782 vm_map_locked(vm_map_t map) 783 { 784 785 if (map->system_map) 786 return (mtx_owned(&map->system_mtx)); 787 else 788 return (sx_xlocked(&map->lock)); 789 } 790 791 /* 792 * _vm_map_unlock_and_wait: 793 * 794 * Atomically releases the lock on the specified map and puts the calling 795 * thread to sleep. The calling thread will remain asleep until either 796 * vm_map_wakeup() is performed on the map or the specified timeout is 797 * exceeded. 798 * 799 * WARNING! This function does not perform deferred deallocations of 800 * objects and map entries. Therefore, the calling thread is expected to 801 * reacquire the map lock after reawakening and later perform an ordinary 802 * unlock operation, such as vm_map_unlock(), before completing its 803 * operation on the map. 804 */ 805 int 806 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 807 { 808 809 VM_MAP_UNLOCK_CONSISTENT(map); 810 mtx_lock(&map_sleep_mtx); 811 if (map->system_map) { 812 KASSERT((map->flags & MAP_REPLENISH) == 0, 813 ("%s: MAP_REPLENISH leaked", __func__)); 814 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 815 } else { 816 sx_xunlock_(&map->lock, file, line); 817 } 818 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 819 timo)); 820 } 821 822 /* 823 * vm_map_wakeup: 824 * 825 * Awaken any threads that have slept on the map using 826 * vm_map_unlock_and_wait(). 827 */ 828 void 829 vm_map_wakeup(vm_map_t map) 830 { 831 832 /* 833 * Acquire and release map_sleep_mtx to prevent a wakeup() 834 * from being performed (and lost) between the map unlock 835 * and the msleep() in _vm_map_unlock_and_wait(). 836 */ 837 mtx_lock(&map_sleep_mtx); 838 mtx_unlock(&map_sleep_mtx); 839 wakeup(&map->root); 840 } 841 842 void 843 vm_map_busy(vm_map_t map) 844 { 845 846 VM_MAP_ASSERT_LOCKED(map); 847 map->busy++; 848 } 849 850 void 851 vm_map_unbusy(vm_map_t map) 852 { 853 854 VM_MAP_ASSERT_LOCKED(map); 855 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 856 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 857 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 858 wakeup(&map->busy); 859 } 860 } 861 862 void 863 vm_map_wait_busy(vm_map_t map) 864 { 865 866 VM_MAP_ASSERT_LOCKED(map); 867 while (map->busy) { 868 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 869 if (map->system_map) 870 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 871 else 872 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 873 } 874 map->timestamp++; 875 } 876 877 long 878 vmspace_resident_count(struct vmspace *vmspace) 879 { 880 return pmap_resident_count(vmspace_pmap(vmspace)); 881 } 882 883 /* 884 * Initialize an existing vm_map structure 885 * such as that in the vmspace structure. 886 */ 887 static void 888 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 889 { 890 891 map->header.eflags = MAP_ENTRY_HEADER; 892 map->needs_wakeup = FALSE; 893 map->system_map = 0; 894 map->pmap = pmap; 895 map->header.end = min; 896 map->header.start = max; 897 map->flags = 0; 898 map->header.left = map->header.right = &map->header; 899 map->root = NULL; 900 map->timestamp = 0; 901 map->busy = 0; 902 map->anon_loc = 0; 903 #ifdef DIAGNOSTIC 904 map->nupdates = 0; 905 #endif 906 } 907 908 void 909 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 910 { 911 912 _vm_map_init(map, pmap, min, max); 913 mtx_init(&map->system_mtx, "vm map (system)", NULL, 914 MTX_DEF | MTX_DUPOK); 915 sx_init(&map->lock, "vm map (user)"); 916 } 917 918 /* 919 * vm_map_entry_dispose: [ internal use only ] 920 * 921 * Inverse of vm_map_entry_create. 922 */ 923 static void 924 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 925 { 926 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 927 } 928 929 /* 930 * vm_map_entry_create: [ internal use only ] 931 * 932 * Allocates a VM map entry for insertion. 933 * No entry fields are filled in. 934 */ 935 static vm_map_entry_t 936 vm_map_entry_create(vm_map_t map) 937 { 938 vm_map_entry_t new_entry; 939 940 #ifndef UMA_USE_DMAP 941 if (map == kernel_map) { 942 VM_MAP_ASSERT_LOCKED(map); 943 944 /* 945 * A new slab of kernel map entries cannot be allocated at this 946 * point because the kernel map has not yet been updated to 947 * reflect the caller's request. Therefore, we allocate a new 948 * map entry, dipping into the reserve if necessary, and set a 949 * flag indicating that the reserve must be replenished before 950 * the map is unlocked. 951 */ 952 new_entry = uma_zalloc(kmapentzone, M_NOWAIT | M_NOVM); 953 if (new_entry == NULL) { 954 new_entry = uma_zalloc(kmapentzone, 955 M_NOWAIT | M_NOVM | M_USE_RESERVE); 956 kernel_map->flags |= MAP_REPLENISH; 957 } 958 } else 959 #endif 960 if (map->system_map) { 961 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 962 } else { 963 new_entry = uma_zalloc(mapentzone, M_WAITOK); 964 } 965 KASSERT(new_entry != NULL, 966 ("vm_map_entry_create: kernel resources exhausted")); 967 return (new_entry); 968 } 969 970 /* 971 * vm_map_entry_set_behavior: 972 * 973 * Set the expected access behavior, either normal, random, or 974 * sequential. 975 */ 976 static inline void 977 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 978 { 979 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 980 (behavior & MAP_ENTRY_BEHAV_MASK); 981 } 982 983 /* 984 * vm_map_entry_max_free_{left,right}: 985 * 986 * Compute the size of the largest free gap between two entries, 987 * one the root of a tree and the other the ancestor of that root 988 * that is the least or greatest ancestor found on the search path. 989 */ 990 static inline vm_size_t 991 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) 992 { 993 994 return (root->left != left_ancestor ? 995 root->left->max_free : root->start - left_ancestor->end); 996 } 997 998 static inline vm_size_t 999 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) 1000 { 1001 1002 return (root->right != right_ancestor ? 1003 root->right->max_free : right_ancestor->start - root->end); 1004 } 1005 1006 /* 1007 * vm_map_entry_{pred,succ}: 1008 * 1009 * Find the {predecessor, successor} of the entry by taking one step 1010 * in the appropriate direction and backtracking as much as necessary. 1011 * vm_map_entry_succ is defined in vm_map.h. 1012 */ 1013 static inline vm_map_entry_t 1014 vm_map_entry_pred(vm_map_entry_t entry) 1015 { 1016 vm_map_entry_t prior; 1017 1018 prior = entry->left; 1019 if (prior->right->start < entry->start) { 1020 do 1021 prior = prior->right; 1022 while (prior->right != entry); 1023 } 1024 return (prior); 1025 } 1026 1027 static inline vm_size_t 1028 vm_size_max(vm_size_t a, vm_size_t b) 1029 { 1030 1031 return (a > b ? a : b); 1032 } 1033 1034 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \ 1035 vm_map_entry_t z; \ 1036 vm_size_t max_free; \ 1037 \ 1038 /* \ 1039 * Infer root->right->max_free == root->max_free when \ 1040 * y->max_free < root->max_free || root->max_free == 0. \ 1041 * Otherwise, look right to find it. \ 1042 */ \ 1043 y = root->left; \ 1044 max_free = root->max_free; \ 1045 KASSERT(max_free == vm_size_max( \ 1046 vm_map_entry_max_free_left(root, llist), \ 1047 vm_map_entry_max_free_right(root, rlist)), \ 1048 ("%s: max_free invariant fails", __func__)); \ 1049 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \ 1050 max_free = vm_map_entry_max_free_right(root, rlist); \ 1051 if (y != llist && (test)) { \ 1052 /* Rotate right and make y root. */ \ 1053 z = y->right; \ 1054 if (z != root) { \ 1055 root->left = z; \ 1056 y->right = root; \ 1057 if (max_free < y->max_free) \ 1058 root->max_free = max_free = \ 1059 vm_size_max(max_free, z->max_free); \ 1060 } else if (max_free < y->max_free) \ 1061 root->max_free = max_free = \ 1062 vm_size_max(max_free, root->start - y->end);\ 1063 root = y; \ 1064 y = root->left; \ 1065 } \ 1066 /* Copy right->max_free. Put root on rlist. */ \ 1067 root->max_free = max_free; \ 1068 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \ 1069 ("%s: max_free not copied from right", __func__)); \ 1070 root->left = rlist; \ 1071 rlist = root; \ 1072 root = y != llist ? y : NULL; \ 1073 } while (0) 1074 1075 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \ 1076 vm_map_entry_t z; \ 1077 vm_size_t max_free; \ 1078 \ 1079 /* \ 1080 * Infer root->left->max_free == root->max_free when \ 1081 * y->max_free < root->max_free || root->max_free == 0. \ 1082 * Otherwise, look left to find it. \ 1083 */ \ 1084 y = root->right; \ 1085 max_free = root->max_free; \ 1086 KASSERT(max_free == vm_size_max( \ 1087 vm_map_entry_max_free_left(root, llist), \ 1088 vm_map_entry_max_free_right(root, rlist)), \ 1089 ("%s: max_free invariant fails", __func__)); \ 1090 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \ 1091 max_free = vm_map_entry_max_free_left(root, llist); \ 1092 if (y != rlist && (test)) { \ 1093 /* Rotate left and make y root. */ \ 1094 z = y->left; \ 1095 if (z != root) { \ 1096 root->right = z; \ 1097 y->left = root; \ 1098 if (max_free < y->max_free) \ 1099 root->max_free = max_free = \ 1100 vm_size_max(max_free, z->max_free); \ 1101 } else if (max_free < y->max_free) \ 1102 root->max_free = max_free = \ 1103 vm_size_max(max_free, y->start - root->end);\ 1104 root = y; \ 1105 y = root->right; \ 1106 } \ 1107 /* Copy left->max_free. Put root on llist. */ \ 1108 root->max_free = max_free; \ 1109 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \ 1110 ("%s: max_free not copied from left", __func__)); \ 1111 root->right = llist; \ 1112 llist = root; \ 1113 root = y != rlist ? y : NULL; \ 1114 } while (0) 1115 1116 /* 1117 * Walk down the tree until we find addr or a gap where addr would go, breaking 1118 * off left and right subtrees of nodes less than, or greater than addr. Treat 1119 * subtrees with root->max_free < length as empty trees. llist and rlist are 1120 * the two sides in reverse order (bottom-up), with llist linked by the right 1121 * pointer and rlist linked by the left pointer in the vm_map_entry, and both 1122 * lists terminated by &map->header. This function, and the subsequent call to 1123 * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address 1124 * values in &map->header. 1125 */ 1126 static __always_inline vm_map_entry_t 1127 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, 1128 vm_map_entry_t *llist, vm_map_entry_t *rlist) 1129 { 1130 vm_map_entry_t left, right, root, y; 1131 1132 left = right = &map->header; 1133 root = map->root; 1134 while (root != NULL && root->max_free >= length) { 1135 KASSERT(left->end <= root->start && 1136 root->end <= right->start, 1137 ("%s: root not within tree bounds", __func__)); 1138 if (addr < root->start) { 1139 SPLAY_LEFT_STEP(root, y, left, right, 1140 y->max_free >= length && addr < y->start); 1141 } else if (addr >= root->end) { 1142 SPLAY_RIGHT_STEP(root, y, left, right, 1143 y->max_free >= length && addr >= y->end); 1144 } else 1145 break; 1146 } 1147 *llist = left; 1148 *rlist = right; 1149 return (root); 1150 } 1151 1152 static __always_inline void 1153 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist) 1154 { 1155 vm_map_entry_t hi, right, y; 1156 1157 right = *rlist; 1158 hi = root->right == right ? NULL : root->right; 1159 if (hi == NULL) 1160 return; 1161 do 1162 SPLAY_LEFT_STEP(hi, y, root, right, true); 1163 while (hi != NULL); 1164 *rlist = right; 1165 } 1166 1167 static __always_inline void 1168 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist) 1169 { 1170 vm_map_entry_t left, lo, y; 1171 1172 left = *llist; 1173 lo = root->left == left ? NULL : root->left; 1174 if (lo == NULL) 1175 return; 1176 do 1177 SPLAY_RIGHT_STEP(lo, y, left, root, true); 1178 while (lo != NULL); 1179 *llist = left; 1180 } 1181 1182 static inline void 1183 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b) 1184 { 1185 vm_map_entry_t tmp; 1186 1187 tmp = *b; 1188 *b = *a; 1189 *a = tmp; 1190 } 1191 1192 /* 1193 * Walk back up the two spines, flip the pointers and set max_free. The 1194 * subtrees of the root go at the bottom of llist and rlist. 1195 */ 1196 static vm_size_t 1197 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root, 1198 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist) 1199 { 1200 do { 1201 /* 1202 * The max_free values of the children of llist are in 1203 * llist->max_free and max_free. Update with the 1204 * max value. 1205 */ 1206 llist->max_free = max_free = 1207 vm_size_max(llist->max_free, max_free); 1208 vm_map_entry_swap(&llist->right, &tail); 1209 vm_map_entry_swap(&tail, &llist); 1210 } while (llist != header); 1211 root->left = tail; 1212 return (max_free); 1213 } 1214 1215 /* 1216 * When llist is known to be the predecessor of root. 1217 */ 1218 static inline vm_size_t 1219 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root, 1220 vm_map_entry_t llist) 1221 { 1222 vm_size_t max_free; 1223 1224 max_free = root->start - llist->end; 1225 if (llist != header) { 1226 max_free = vm_map_splay_merge_left_walk(header, root, 1227 root, max_free, llist); 1228 } else { 1229 root->left = header; 1230 header->right = root; 1231 } 1232 return (max_free); 1233 } 1234 1235 /* 1236 * When llist may or may not be the predecessor of root. 1237 */ 1238 static inline vm_size_t 1239 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root, 1240 vm_map_entry_t llist) 1241 { 1242 vm_size_t max_free; 1243 1244 max_free = vm_map_entry_max_free_left(root, llist); 1245 if (llist != header) { 1246 max_free = vm_map_splay_merge_left_walk(header, root, 1247 root->left == llist ? root : root->left, 1248 max_free, llist); 1249 } 1250 return (max_free); 1251 } 1252 1253 static vm_size_t 1254 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root, 1255 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist) 1256 { 1257 do { 1258 /* 1259 * The max_free values of the children of rlist are in 1260 * rlist->max_free and max_free. Update with the 1261 * max value. 1262 */ 1263 rlist->max_free = max_free = 1264 vm_size_max(rlist->max_free, max_free); 1265 vm_map_entry_swap(&rlist->left, &tail); 1266 vm_map_entry_swap(&tail, &rlist); 1267 } while (rlist != header); 1268 root->right = tail; 1269 return (max_free); 1270 } 1271 1272 /* 1273 * When rlist is known to be the succecessor of root. 1274 */ 1275 static inline vm_size_t 1276 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root, 1277 vm_map_entry_t rlist) 1278 { 1279 vm_size_t max_free; 1280 1281 max_free = rlist->start - root->end; 1282 if (rlist != header) { 1283 max_free = vm_map_splay_merge_right_walk(header, root, 1284 root, max_free, rlist); 1285 } else { 1286 root->right = header; 1287 header->left = root; 1288 } 1289 return (max_free); 1290 } 1291 1292 /* 1293 * When rlist may or may not be the succecessor of root. 1294 */ 1295 static inline vm_size_t 1296 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root, 1297 vm_map_entry_t rlist) 1298 { 1299 vm_size_t max_free; 1300 1301 max_free = vm_map_entry_max_free_right(root, rlist); 1302 if (rlist != header) { 1303 max_free = vm_map_splay_merge_right_walk(header, root, 1304 root->right == rlist ? root : root->right, 1305 max_free, rlist); 1306 } 1307 return (max_free); 1308 } 1309 1310 /* 1311 * vm_map_splay: 1312 * 1313 * The Sleator and Tarjan top-down splay algorithm with the 1314 * following variation. Max_free must be computed bottom-up, so 1315 * on the downward pass, maintain the left and right spines in 1316 * reverse order. Then, make a second pass up each side to fix 1317 * the pointers and compute max_free. The time bound is O(log n) 1318 * amortized. 1319 * 1320 * The tree is threaded, which means that there are no null pointers. 1321 * When a node has no left child, its left pointer points to its 1322 * predecessor, which the last ancestor on the search path from the root 1323 * where the search branched right. Likewise, when a node has no right 1324 * child, its right pointer points to its successor. The map header node 1325 * is the predecessor of the first map entry, and the successor of the 1326 * last. 1327 * 1328 * The new root is the vm_map_entry containing "addr", or else an 1329 * adjacent entry (lower if possible) if addr is not in the tree. 1330 * 1331 * The map must be locked, and leaves it so. 1332 * 1333 * Returns: the new root. 1334 */ 1335 static vm_map_entry_t 1336 vm_map_splay(vm_map_t map, vm_offset_t addr) 1337 { 1338 vm_map_entry_t header, llist, rlist, root; 1339 vm_size_t max_free_left, max_free_right; 1340 1341 header = &map->header; 1342 root = vm_map_splay_split(map, addr, 0, &llist, &rlist); 1343 if (root != NULL) { 1344 max_free_left = vm_map_splay_merge_left(header, root, llist); 1345 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1346 } else if (llist != header) { 1347 /* 1348 * Recover the greatest node in the left 1349 * subtree and make it the root. 1350 */ 1351 root = llist; 1352 llist = root->right; 1353 max_free_left = vm_map_splay_merge_left(header, root, llist); 1354 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1355 } else if (rlist != header) { 1356 /* 1357 * Recover the least node in the right 1358 * subtree and make it the root. 1359 */ 1360 root = rlist; 1361 rlist = root->left; 1362 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1363 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1364 } else { 1365 /* There is no root. */ 1366 return (NULL); 1367 } 1368 root->max_free = vm_size_max(max_free_left, max_free_right); 1369 map->root = root; 1370 VM_MAP_ASSERT_CONSISTENT(map); 1371 return (root); 1372 } 1373 1374 /* 1375 * vm_map_entry_{un,}link: 1376 * 1377 * Insert/remove entries from maps. On linking, if new entry clips 1378 * existing entry, trim existing entry to avoid overlap, and manage 1379 * offsets. On unlinking, merge disappearing entry with neighbor, if 1380 * called for, and manage offsets. Callers should not modify fields in 1381 * entries already mapped. 1382 */ 1383 static void 1384 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1385 { 1386 vm_map_entry_t header, llist, rlist, root; 1387 vm_size_t max_free_left, max_free_right; 1388 1389 CTR3(KTR_VM, 1390 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1391 map->nentries, entry); 1392 VM_MAP_ASSERT_LOCKED(map); 1393 map->nentries++; 1394 header = &map->header; 1395 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1396 if (root == NULL) { 1397 /* 1398 * The new entry does not overlap any existing entry in the 1399 * map, so it becomes the new root of the map tree. 1400 */ 1401 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1402 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1403 } else if (entry->start == root->start) { 1404 /* 1405 * The new entry is a clone of root, with only the end field 1406 * changed. The root entry will be shrunk to abut the new 1407 * entry, and will be the right child of the new root entry in 1408 * the modified map. 1409 */ 1410 KASSERT(entry->end < root->end, 1411 ("%s: clip_start not within entry", __func__)); 1412 vm_map_splay_findprev(root, &llist); 1413 if ((root->eflags & (MAP_ENTRY_STACK_GAP_DN | 1414 MAP_ENTRY_STACK_GAP_UP)) == 0) 1415 root->offset += entry->end - root->start; 1416 root->start = entry->end; 1417 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1418 max_free_right = root->max_free = vm_size_max( 1419 vm_map_splay_merge_pred(entry, root, entry), 1420 vm_map_splay_merge_right(header, root, rlist)); 1421 } else { 1422 /* 1423 * The new entry is a clone of root, with only the start field 1424 * changed. The root entry will be shrunk to abut the new 1425 * entry, and will be the left child of the new root entry in 1426 * the modified map. 1427 */ 1428 KASSERT(entry->end == root->end, 1429 ("%s: clip_start not within entry", __func__)); 1430 vm_map_splay_findnext(root, &rlist); 1431 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 1432 MAP_ENTRY_STACK_GAP_UP)) == 0) 1433 entry->offset += entry->start - root->start; 1434 root->end = entry->start; 1435 max_free_left = root->max_free = vm_size_max( 1436 vm_map_splay_merge_left(header, root, llist), 1437 vm_map_splay_merge_succ(entry, root, entry)); 1438 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1439 } 1440 entry->max_free = vm_size_max(max_free_left, max_free_right); 1441 map->root = entry; 1442 VM_MAP_ASSERT_CONSISTENT(map); 1443 } 1444 1445 enum unlink_merge_type { 1446 UNLINK_MERGE_NONE, 1447 UNLINK_MERGE_NEXT 1448 }; 1449 1450 static void 1451 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, 1452 enum unlink_merge_type op) 1453 { 1454 vm_map_entry_t header, llist, rlist, root; 1455 vm_size_t max_free_left, max_free_right; 1456 1457 VM_MAP_ASSERT_LOCKED(map); 1458 header = &map->header; 1459 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1460 KASSERT(root != NULL, 1461 ("vm_map_entry_unlink: unlink object not mapped")); 1462 1463 vm_map_splay_findprev(root, &llist); 1464 vm_map_splay_findnext(root, &rlist); 1465 if (op == UNLINK_MERGE_NEXT) { 1466 rlist->start = root->start; 1467 MPASS((rlist->eflags & (MAP_ENTRY_STACK_GAP_DN | 1468 MAP_ENTRY_STACK_GAP_UP)) == 0); 1469 rlist->offset = root->offset; 1470 } 1471 if (llist != header) { 1472 root = llist; 1473 llist = root->right; 1474 max_free_left = vm_map_splay_merge_left(header, root, llist); 1475 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1476 } else if (rlist != header) { 1477 root = rlist; 1478 rlist = root->left; 1479 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1480 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1481 } else { 1482 header->left = header->right = header; 1483 root = NULL; 1484 } 1485 if (root != NULL) 1486 root->max_free = vm_size_max(max_free_left, max_free_right); 1487 map->root = root; 1488 VM_MAP_ASSERT_CONSISTENT(map); 1489 map->nentries--; 1490 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1491 map->nentries, entry); 1492 } 1493 1494 /* 1495 * vm_map_entry_resize: 1496 * 1497 * Resize a vm_map_entry, recompute the amount of free space that 1498 * follows it and propagate that value up the tree. 1499 * 1500 * The map must be locked, and leaves it so. 1501 */ 1502 static void 1503 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) 1504 { 1505 vm_map_entry_t header, llist, rlist, root; 1506 1507 VM_MAP_ASSERT_LOCKED(map); 1508 header = &map->header; 1509 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1510 KASSERT(root != NULL, ("%s: resize object not mapped", __func__)); 1511 vm_map_splay_findnext(root, &rlist); 1512 entry->end += grow_amount; 1513 root->max_free = vm_size_max( 1514 vm_map_splay_merge_left(header, root, llist), 1515 vm_map_splay_merge_succ(header, root, rlist)); 1516 map->root = root; 1517 VM_MAP_ASSERT_CONSISTENT(map); 1518 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", 1519 __func__, map, map->nentries, entry); 1520 } 1521 1522 /* 1523 * vm_map_lookup_entry: [ internal use only ] 1524 * 1525 * Finds the map entry containing (or 1526 * immediately preceding) the specified address 1527 * in the given map; the entry is returned 1528 * in the "entry" parameter. The boolean 1529 * result indicates whether the address is 1530 * actually contained in the map. 1531 */ 1532 boolean_t 1533 vm_map_lookup_entry( 1534 vm_map_t map, 1535 vm_offset_t address, 1536 vm_map_entry_t *entry) /* OUT */ 1537 { 1538 vm_map_entry_t cur, header, lbound, ubound; 1539 boolean_t locked; 1540 1541 /* 1542 * If the map is empty, then the map entry immediately preceding 1543 * "address" is the map's header. 1544 */ 1545 header = &map->header; 1546 cur = map->root; 1547 if (cur == NULL) { 1548 *entry = header; 1549 return (FALSE); 1550 } 1551 if (address >= cur->start && cur->end > address) { 1552 *entry = cur; 1553 return (TRUE); 1554 } 1555 if ((locked = vm_map_locked(map)) || 1556 sx_try_upgrade(&map->lock)) { 1557 /* 1558 * Splay requires a write lock on the map. However, it only 1559 * restructures the binary search tree; it does not otherwise 1560 * change the map. Thus, the map's timestamp need not change 1561 * on a temporary upgrade. 1562 */ 1563 cur = vm_map_splay(map, address); 1564 if (!locked) { 1565 VM_MAP_UNLOCK_CONSISTENT(map); 1566 sx_downgrade(&map->lock); 1567 } 1568 1569 /* 1570 * If "address" is contained within a map entry, the new root 1571 * is that map entry. Otherwise, the new root is a map entry 1572 * immediately before or after "address". 1573 */ 1574 if (address < cur->start) { 1575 *entry = header; 1576 return (FALSE); 1577 } 1578 *entry = cur; 1579 return (address < cur->end); 1580 } 1581 /* 1582 * Since the map is only locked for read access, perform a 1583 * standard binary search tree lookup for "address". 1584 */ 1585 lbound = ubound = header; 1586 for (;;) { 1587 if (address < cur->start) { 1588 ubound = cur; 1589 cur = cur->left; 1590 if (cur == lbound) 1591 break; 1592 } else if (cur->end <= address) { 1593 lbound = cur; 1594 cur = cur->right; 1595 if (cur == ubound) 1596 break; 1597 } else { 1598 *entry = cur; 1599 return (TRUE); 1600 } 1601 } 1602 *entry = lbound; 1603 return (FALSE); 1604 } 1605 1606 /* 1607 * vm_map_insert1() is identical to vm_map_insert() except that it 1608 * returns the newly inserted map entry in '*res'. In case the new 1609 * entry is coalesced with a neighbor or an existing entry was 1610 * resized, that entry is returned. In any case, the returned entry 1611 * covers the specified address range. 1612 */ 1613 static int 1614 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1615 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow, 1616 vm_map_entry_t *res) 1617 { 1618 vm_map_entry_t new_entry, next_entry, prev_entry; 1619 struct ucred *cred; 1620 vm_eflags_t protoeflags; 1621 vm_inherit_t inheritance; 1622 u_long bdry; 1623 u_int bidx; 1624 1625 VM_MAP_ASSERT_LOCKED(map); 1626 KASSERT(object != kernel_object || 1627 (cow & MAP_COPY_ON_WRITE) == 0, 1628 ("vm_map_insert: kernel object and COW")); 1629 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 || 1630 (cow & MAP_SPLIT_BOUNDARY_MASK) != 0, 1631 ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x", 1632 object, cow)); 1633 KASSERT((prot & ~max) == 0, 1634 ("prot %#x is not subset of max_prot %#x", prot, max)); 1635 1636 /* 1637 * Check that the start and end points are not bogus. 1638 */ 1639 if (start == end || !vm_map_range_valid(map, start, end)) 1640 return (KERN_INVALID_ADDRESS); 1641 1642 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | 1643 VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) 1644 return (KERN_PROTECTION_FAILURE); 1645 1646 /* 1647 * Find the entry prior to the proposed starting address; if it's part 1648 * of an existing entry, this range is bogus. 1649 */ 1650 if (vm_map_lookup_entry(map, start, &prev_entry)) 1651 return (KERN_NO_SPACE); 1652 1653 /* 1654 * Assert that the next entry doesn't overlap the end point. 1655 */ 1656 next_entry = vm_map_entry_succ(prev_entry); 1657 if (next_entry->start < end) 1658 return (KERN_NO_SPACE); 1659 1660 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1661 max != VM_PROT_NONE)) 1662 return (KERN_INVALID_ARGUMENT); 1663 1664 protoeflags = 0; 1665 if (cow & MAP_COPY_ON_WRITE) 1666 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1667 if (cow & MAP_NOFAULT) 1668 protoeflags |= MAP_ENTRY_NOFAULT; 1669 if (cow & MAP_DISABLE_SYNCER) 1670 protoeflags |= MAP_ENTRY_NOSYNC; 1671 if (cow & MAP_DISABLE_COREDUMP) 1672 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1673 if (cow & MAP_STACK_GROWS_DOWN) 1674 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1675 if (cow & MAP_STACK_GROWS_UP) 1676 protoeflags |= MAP_ENTRY_GROWS_UP; 1677 if (cow & MAP_WRITECOUNT) 1678 protoeflags |= MAP_ENTRY_WRITECNT; 1679 if (cow & MAP_VN_EXEC) 1680 protoeflags |= MAP_ENTRY_VN_EXEC; 1681 if ((cow & MAP_CREATE_GUARD) != 0) 1682 protoeflags |= MAP_ENTRY_GUARD; 1683 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1684 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1685 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1686 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1687 if (cow & MAP_INHERIT_SHARE) 1688 inheritance = VM_INHERIT_SHARE; 1689 else 1690 inheritance = VM_INHERIT_DEFAULT; 1691 if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) { 1692 /* This magically ignores index 0, for usual page size. */ 1693 bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >> 1694 MAP_SPLIT_BOUNDARY_SHIFT; 1695 if (bidx >= MAXPAGESIZES) 1696 return (KERN_INVALID_ARGUMENT); 1697 bdry = pagesizes[bidx] - 1; 1698 if ((start & bdry) != 0 || (end & bdry) != 0) 1699 return (KERN_INVALID_ARGUMENT); 1700 protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 1701 } 1702 1703 cred = NULL; 1704 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1705 goto charged; 1706 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1707 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1708 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1709 return (KERN_RESOURCE_SHORTAGE); 1710 KASSERT(object == NULL || 1711 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1712 object->cred == NULL, 1713 ("overcommit: vm_map_insert o %p", object)); 1714 cred = curthread->td_ucred; 1715 } 1716 1717 charged: 1718 /* Expand the kernel pmap, if necessary. */ 1719 if (map == kernel_map && end > kernel_vm_end) 1720 pmap_growkernel(end); 1721 if (object != NULL) { 1722 /* 1723 * OBJ_ONEMAPPING must be cleared unless this mapping 1724 * is trivially proven to be the only mapping for any 1725 * of the object's pages. (Object granularity 1726 * reference counting is insufficient to recognize 1727 * aliases with precision.) 1728 */ 1729 if ((object->flags & OBJ_ANON) != 0) { 1730 VM_OBJECT_WLOCK(object); 1731 if (object->ref_count > 1 || object->shadow_count != 0) 1732 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1733 VM_OBJECT_WUNLOCK(object); 1734 } 1735 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1736 protoeflags && 1737 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP | 1738 MAP_VN_EXEC)) == 0 && 1739 prev_entry->end == start && (prev_entry->cred == cred || 1740 (prev_entry->object.vm_object != NULL && 1741 prev_entry->object.vm_object->cred == cred)) && 1742 vm_object_coalesce(prev_entry->object.vm_object, 1743 prev_entry->offset, 1744 (vm_size_t)(prev_entry->end - prev_entry->start), 1745 (vm_size_t)(end - prev_entry->end), cred != NULL && 1746 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1747 /* 1748 * We were able to extend the object. Determine if we 1749 * can extend the previous map entry to include the 1750 * new range as well. 1751 */ 1752 if (prev_entry->inheritance == inheritance && 1753 prev_entry->protection == prot && 1754 prev_entry->max_protection == max && 1755 prev_entry->wired_count == 0) { 1756 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1757 0, ("prev_entry %p has incoherent wiring", 1758 prev_entry)); 1759 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1760 map->size += end - prev_entry->end; 1761 vm_map_entry_resize(map, prev_entry, 1762 end - prev_entry->end); 1763 *res = vm_map_try_merge_entries(map, prev_entry, 1764 next_entry); 1765 return (KERN_SUCCESS); 1766 } 1767 1768 /* 1769 * If we can extend the object but cannot extend the 1770 * map entry, we have to create a new map entry. We 1771 * must bump the ref count on the extended object to 1772 * account for it. object may be NULL. 1773 */ 1774 object = prev_entry->object.vm_object; 1775 offset = prev_entry->offset + 1776 (prev_entry->end - prev_entry->start); 1777 vm_object_reference(object); 1778 if (cred != NULL && object != NULL && object->cred != NULL && 1779 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1780 /* Object already accounts for this uid. */ 1781 cred = NULL; 1782 } 1783 } 1784 if (cred != NULL) 1785 crhold(cred); 1786 1787 /* 1788 * Create a new entry 1789 */ 1790 new_entry = vm_map_entry_create(map); 1791 new_entry->start = start; 1792 new_entry->end = end; 1793 new_entry->cred = NULL; 1794 1795 new_entry->eflags = protoeflags; 1796 new_entry->object.vm_object = object; 1797 new_entry->offset = offset; 1798 1799 new_entry->inheritance = inheritance; 1800 new_entry->protection = prot; 1801 new_entry->max_protection = max; 1802 new_entry->wired_count = 0; 1803 new_entry->wiring_thread = NULL; 1804 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1805 new_entry->next_read = start; 1806 1807 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1808 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1809 new_entry->cred = cred; 1810 1811 /* 1812 * Insert the new entry into the list 1813 */ 1814 vm_map_entry_link(map, new_entry); 1815 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1816 map->size += new_entry->end - new_entry->start; 1817 1818 /* 1819 * Try to coalesce the new entry with both the previous and next 1820 * entries in the list. Previously, we only attempted to coalesce 1821 * with the previous entry when object is NULL. Here, we handle the 1822 * other cases, which are less common. 1823 */ 1824 vm_map_try_merge_entries(map, prev_entry, new_entry); 1825 *res = vm_map_try_merge_entries(map, new_entry, next_entry); 1826 1827 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1828 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1829 end - start, cow & MAP_PREFAULT_PARTIAL); 1830 } 1831 1832 return (KERN_SUCCESS); 1833 } 1834 1835 /* 1836 * vm_map_insert: 1837 * 1838 * Inserts the given VM object into the target map at the 1839 * specified address range. 1840 * 1841 * Requires that the map be locked, and leaves it so. 1842 * 1843 * If object is non-NULL, ref count must be bumped by caller 1844 * prior to making call to account for the new entry. 1845 */ 1846 int 1847 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1848 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1849 { 1850 vm_map_entry_t res; 1851 1852 return (vm_map_insert1(map, object, offset, start, end, prot, max, 1853 cow, &res)); 1854 } 1855 1856 /* 1857 * vm_map_findspace: 1858 * 1859 * Find the first fit (lowest VM address) for "length" free bytes 1860 * beginning at address >= start in the given map. 1861 * 1862 * In a vm_map_entry, "max_free" is the maximum amount of 1863 * contiguous free space between an entry in its subtree and a 1864 * neighbor of that entry. This allows finding a free region in 1865 * one path down the tree, so O(log n) amortized with splay 1866 * trees. 1867 * 1868 * The map must be locked, and leaves it so. 1869 * 1870 * Returns: starting address if sufficient space, 1871 * vm_map_max(map)-length+1 if insufficient space. 1872 */ 1873 vm_offset_t 1874 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1875 { 1876 vm_map_entry_t header, llist, rlist, root, y; 1877 vm_size_t left_length, max_free_left, max_free_right; 1878 vm_offset_t gap_end; 1879 1880 VM_MAP_ASSERT_LOCKED(map); 1881 1882 /* 1883 * Request must fit within min/max VM address and must avoid 1884 * address wrap. 1885 */ 1886 start = MAX(start, vm_map_min(map)); 1887 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) 1888 return (vm_map_max(map) - length + 1); 1889 1890 /* Empty tree means wide open address space. */ 1891 if (map->root == NULL) 1892 return (start); 1893 1894 /* 1895 * After splay_split, if start is within an entry, push it to the start 1896 * of the following gap. If rlist is at the end of the gap containing 1897 * start, save the end of that gap in gap_end to see if the gap is big 1898 * enough; otherwise set gap_end to start skip gap-checking and move 1899 * directly to a search of the right subtree. 1900 */ 1901 header = &map->header; 1902 root = vm_map_splay_split(map, start, length, &llist, &rlist); 1903 gap_end = rlist->start; 1904 if (root != NULL) { 1905 start = root->end; 1906 if (root->right != rlist) 1907 gap_end = start; 1908 max_free_left = vm_map_splay_merge_left(header, root, llist); 1909 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1910 } else if (rlist != header) { 1911 root = rlist; 1912 rlist = root->left; 1913 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1914 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1915 } else { 1916 root = llist; 1917 llist = root->right; 1918 max_free_left = vm_map_splay_merge_left(header, root, llist); 1919 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1920 } 1921 root->max_free = vm_size_max(max_free_left, max_free_right); 1922 map->root = root; 1923 VM_MAP_ASSERT_CONSISTENT(map); 1924 if (length <= gap_end - start) 1925 return (start); 1926 1927 /* With max_free, can immediately tell if no solution. */ 1928 if (root->right == header || length > root->right->max_free) 1929 return (vm_map_max(map) - length + 1); 1930 1931 /* 1932 * Splay for the least large-enough gap in the right subtree. 1933 */ 1934 llist = rlist = header; 1935 for (left_length = 0;; 1936 left_length = vm_map_entry_max_free_left(root, llist)) { 1937 if (length <= left_length) 1938 SPLAY_LEFT_STEP(root, y, llist, rlist, 1939 length <= vm_map_entry_max_free_left(y, llist)); 1940 else 1941 SPLAY_RIGHT_STEP(root, y, llist, rlist, 1942 length > vm_map_entry_max_free_left(y, root)); 1943 if (root == NULL) 1944 break; 1945 } 1946 root = llist; 1947 llist = root->right; 1948 max_free_left = vm_map_splay_merge_left(header, root, llist); 1949 if (rlist == header) { 1950 root->max_free = vm_size_max(max_free_left, 1951 vm_map_splay_merge_succ(header, root, rlist)); 1952 } else { 1953 y = rlist; 1954 rlist = y->left; 1955 y->max_free = vm_size_max( 1956 vm_map_splay_merge_pred(root, y, root), 1957 vm_map_splay_merge_right(header, y, rlist)); 1958 root->max_free = vm_size_max(max_free_left, y->max_free); 1959 } 1960 map->root = root; 1961 VM_MAP_ASSERT_CONSISTENT(map); 1962 return (root->end); 1963 } 1964 1965 int 1966 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1967 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1968 vm_prot_t max, int cow) 1969 { 1970 vm_offset_t end; 1971 int result; 1972 1973 end = start + length; 1974 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1975 object == NULL, 1976 ("vm_map_fixed: non-NULL backing object for stack")); 1977 vm_map_lock(map); 1978 VM_MAP_RANGE_CHECK(map, start, end); 1979 if ((cow & MAP_CHECK_EXCL) == 0) { 1980 result = vm_map_delete(map, start, end); 1981 if (result != KERN_SUCCESS) 1982 goto out; 1983 } 1984 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1985 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1986 prot, max, cow); 1987 } else { 1988 result = vm_map_insert(map, object, offset, start, end, 1989 prot, max, cow); 1990 } 1991 out: 1992 vm_map_unlock(map); 1993 return (result); 1994 } 1995 1996 #if VM_NRESERVLEVEL <= 1 1997 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1998 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1999 #elif VM_NRESERVLEVEL == 2 2000 static const int aslr_pages_rnd_64[3] = {0x1000, 0x1000, 0x10}; 2001 static const int aslr_pages_rnd_32[3] = {0x100, 0x100, 0x4}; 2002 #else 2003 #error "Unsupported VM_NRESERVLEVEL" 2004 #endif 2005 2006 static int cluster_anon = 1; 2007 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 2008 &cluster_anon, 0, 2009 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 2010 2011 static bool 2012 clustering_anon_allowed(vm_offset_t addr, int cow) 2013 { 2014 2015 switch (cluster_anon) { 2016 case 0: 2017 return (false); 2018 case 1: 2019 return (addr == 0 || (cow & MAP_NO_HINT) != 0); 2020 case 2: 2021 default: 2022 return (true); 2023 } 2024 } 2025 2026 static long aslr_restarts; 2027 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 2028 &aslr_restarts, 0, 2029 "Number of aslr failures"); 2030 2031 /* 2032 * Searches for the specified amount of free space in the given map with the 2033 * specified alignment. Performs an address-ordered, first-fit search from 2034 * the given address "*addr", with an optional upper bound "max_addr". If the 2035 * parameter "alignment" is zero, then the alignment is computed from the 2036 * given (object, offset) pair so as to enable the greatest possible use of 2037 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 2038 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 2039 * 2040 * The map must be locked. Initially, there must be at least "length" bytes 2041 * of free space at the given address. 2042 */ 2043 static int 2044 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2045 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 2046 vm_offset_t alignment) 2047 { 2048 vm_offset_t aligned_addr, free_addr; 2049 2050 VM_MAP_ASSERT_LOCKED(map); 2051 free_addr = *addr; 2052 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 2053 ("caller failed to provide space %#jx at address %p", 2054 (uintmax_t)length, (void *)free_addr)); 2055 for (;;) { 2056 /* 2057 * At the start of every iteration, the free space at address 2058 * "*addr" is at least "length" bytes. 2059 */ 2060 if (alignment == 0) 2061 pmap_align_superpage(object, offset, addr, length); 2062 else 2063 *addr = roundup2(*addr, alignment); 2064 aligned_addr = *addr; 2065 if (aligned_addr == free_addr) { 2066 /* 2067 * Alignment did not change "*addr", so "*addr" must 2068 * still provide sufficient free space. 2069 */ 2070 return (KERN_SUCCESS); 2071 } 2072 2073 /* 2074 * Test for address wrap on "*addr". A wrapped "*addr" could 2075 * be a valid address, in which case vm_map_findspace() cannot 2076 * be relied upon to fail. 2077 */ 2078 if (aligned_addr < free_addr) 2079 return (KERN_NO_SPACE); 2080 *addr = vm_map_findspace(map, aligned_addr, length); 2081 if (*addr + length > vm_map_max(map) || 2082 (max_addr != 0 && *addr + length > max_addr)) 2083 return (KERN_NO_SPACE); 2084 free_addr = *addr; 2085 if (free_addr == aligned_addr) { 2086 /* 2087 * If a successful call to vm_map_findspace() did not 2088 * change "*addr", then "*addr" must still be aligned 2089 * and provide sufficient free space. 2090 */ 2091 return (KERN_SUCCESS); 2092 } 2093 } 2094 } 2095 2096 int 2097 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length, 2098 vm_offset_t max_addr, vm_offset_t alignment) 2099 { 2100 /* XXXKIB ASLR eh ? */ 2101 *addr = vm_map_findspace(map, *addr, length); 2102 if (*addr + length > vm_map_max(map) || 2103 (max_addr != 0 && *addr + length > max_addr)) 2104 return (KERN_NO_SPACE); 2105 return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr, 2106 alignment)); 2107 } 2108 2109 /* 2110 * vm_map_find finds an unallocated region in the target address 2111 * map with the given length. The search is defined to be 2112 * first-fit from the specified address; the region found is 2113 * returned in the same parameter. 2114 * 2115 * If object is non-NULL, ref count must be bumped by caller 2116 * prior to making call to account for the new entry. 2117 */ 2118 int 2119 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2120 vm_offset_t *addr, /* IN/OUT */ 2121 vm_size_t length, vm_offset_t max_addr, int find_space, 2122 vm_prot_t prot, vm_prot_t max, int cow) 2123 { 2124 vm_offset_t alignment, curr_min_addr, min_addr; 2125 int gap, pidx, rv, try; 2126 bool cluster, en_aslr, update_anon; 2127 2128 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 2129 object == NULL, 2130 ("vm_map_find: non-NULL backing object for stack")); 2131 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 2132 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 2133 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 2134 (object->flags & OBJ_COLORED) == 0)) 2135 find_space = VMFS_ANY_SPACE; 2136 if (find_space >> 8 != 0) { 2137 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 2138 alignment = (vm_offset_t)1 << (find_space >> 8); 2139 } else 2140 alignment = 0; 2141 en_aslr = (map->flags & MAP_ASLR) != 0; 2142 update_anon = cluster = clustering_anon_allowed(*addr, cow) && 2143 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 2144 find_space != VMFS_NO_SPACE && object == NULL && 2145 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP | 2146 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE; 2147 curr_min_addr = min_addr = *addr; 2148 if (en_aslr && min_addr == 0 && !cluster && 2149 find_space != VMFS_NO_SPACE && 2150 (map->flags & MAP_ASLR_IGNSTART) != 0) 2151 curr_min_addr = min_addr = vm_map_min(map); 2152 try = 0; 2153 vm_map_lock(map); 2154 if (cluster) { 2155 curr_min_addr = map->anon_loc; 2156 if (curr_min_addr == 0) 2157 cluster = false; 2158 } 2159 if (find_space != VMFS_NO_SPACE) { 2160 KASSERT(find_space == VMFS_ANY_SPACE || 2161 find_space == VMFS_OPTIMAL_SPACE || 2162 find_space == VMFS_SUPER_SPACE || 2163 alignment != 0, ("unexpected VMFS flag")); 2164 again: 2165 /* 2166 * When creating an anonymous mapping, try clustering 2167 * with an existing anonymous mapping first. 2168 * 2169 * We make up to two attempts to find address space 2170 * for a given find_space value. The first attempt may 2171 * apply randomization or may cluster with an existing 2172 * anonymous mapping. If this first attempt fails, 2173 * perform a first-fit search of the available address 2174 * space. 2175 * 2176 * If all tries failed, and find_space is 2177 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 2178 * Again enable clustering and randomization. 2179 */ 2180 try++; 2181 MPASS(try <= 2); 2182 2183 if (try == 2) { 2184 /* 2185 * Second try: we failed either to find a 2186 * suitable region for randomizing the 2187 * allocation, or to cluster with an existing 2188 * mapping. Retry with free run. 2189 */ 2190 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 2191 vm_map_min(map) : min_addr; 2192 atomic_add_long(&aslr_restarts, 1); 2193 } 2194 2195 if (try == 1 && en_aslr && !cluster) { 2196 /* 2197 * Find space for allocation, including 2198 * gap needed for later randomization. 2199 */ 2200 pidx = 0; 2201 #if VM_NRESERVLEVEL > 0 2202 if ((find_space == VMFS_SUPER_SPACE || 2203 find_space == VMFS_OPTIMAL_SPACE) && 2204 pagesizes[VM_NRESERVLEVEL] != 0) { 2205 /* 2206 * Do not pointlessly increase the space that 2207 * is requested from vm_map_findspace(). 2208 * pmap_align_superpage() will only change a 2209 * mapping's alignment if that mapping is at 2210 * least a superpage in size. 2211 */ 2212 pidx = VM_NRESERVLEVEL; 2213 while (pidx > 0 && length < pagesizes[pidx]) 2214 pidx--; 2215 } 2216 #endif 2217 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 2218 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 2219 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 2220 *addr = vm_map_findspace(map, curr_min_addr, 2221 length + gap * pagesizes[pidx]); 2222 if (*addr + length + gap * pagesizes[pidx] > 2223 vm_map_max(map)) 2224 goto again; 2225 /* And randomize the start address. */ 2226 *addr += (arc4random() % gap) * pagesizes[pidx]; 2227 if (max_addr != 0 && *addr + length > max_addr) 2228 goto again; 2229 } else { 2230 *addr = vm_map_findspace(map, curr_min_addr, length); 2231 if (*addr + length > vm_map_max(map) || 2232 (max_addr != 0 && *addr + length > max_addr)) { 2233 if (cluster) { 2234 cluster = false; 2235 MPASS(try == 1); 2236 goto again; 2237 } 2238 rv = KERN_NO_SPACE; 2239 goto done; 2240 } 2241 } 2242 2243 if (find_space != VMFS_ANY_SPACE && 2244 (rv = vm_map_alignspace(map, object, offset, addr, length, 2245 max_addr, alignment)) != KERN_SUCCESS) { 2246 if (find_space == VMFS_OPTIMAL_SPACE) { 2247 find_space = VMFS_ANY_SPACE; 2248 curr_min_addr = min_addr; 2249 cluster = update_anon; 2250 try = 0; 2251 goto again; 2252 } 2253 goto done; 2254 } 2255 } else if ((cow & MAP_REMAP) != 0) { 2256 if (!vm_map_range_valid(map, *addr, *addr + length)) { 2257 rv = KERN_INVALID_ADDRESS; 2258 goto done; 2259 } 2260 rv = vm_map_delete(map, *addr, *addr + length); 2261 if (rv != KERN_SUCCESS) 2262 goto done; 2263 } 2264 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 2265 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 2266 max, cow); 2267 } else { 2268 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 2269 prot, max, cow); 2270 } 2271 2272 /* 2273 * Update the starting address for clustered anonymous memory mappings 2274 * if a starting address was not previously defined or an ASLR restart 2275 * placed an anonymous memory mapping at a lower address. 2276 */ 2277 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || 2278 *addr < map->anon_loc)) 2279 map->anon_loc = *addr; 2280 done: 2281 vm_map_unlock(map); 2282 return (rv); 2283 } 2284 2285 /* 2286 * vm_map_find_min() is a variant of vm_map_find() that takes an 2287 * additional parameter ("default_addr") and treats the given address 2288 * ("*addr") differently. Specifically, it treats "*addr" as a hint 2289 * and not as the minimum address where the mapping is created. 2290 * 2291 * This function works in two phases. First, it tries to 2292 * allocate above the hint. If that fails and the hint is 2293 * greater than "default_addr", it performs a second pass, replacing 2294 * the hint with "default_addr" as the minimum address for the 2295 * allocation. 2296 */ 2297 int 2298 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2299 vm_offset_t *addr, vm_size_t length, vm_offset_t default_addr, 2300 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 2301 int cow) 2302 { 2303 vm_offset_t hint; 2304 int rv; 2305 2306 hint = *addr; 2307 if (hint == 0) { 2308 cow |= MAP_NO_HINT; 2309 *addr = hint = default_addr; 2310 } 2311 for (;;) { 2312 rv = vm_map_find(map, object, offset, addr, length, max_addr, 2313 find_space, prot, max, cow); 2314 if (rv == KERN_SUCCESS || default_addr >= hint) 2315 return (rv); 2316 *addr = hint = default_addr; 2317 } 2318 } 2319 2320 /* 2321 * A map entry with any of the following flags set must not be merged with 2322 * another entry. 2323 */ 2324 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 2325 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \ 2326 MAP_ENTRY_STACK_GAP_UP | MAP_ENTRY_STACK_GAP_DN) 2327 2328 static bool 2329 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 2330 { 2331 2332 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 2333 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 2334 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 2335 prev, entry)); 2336 return (prev->end == entry->start && 2337 prev->object.vm_object == entry->object.vm_object && 2338 (prev->object.vm_object == NULL || 2339 prev->offset + (prev->end - prev->start) == entry->offset) && 2340 prev->eflags == entry->eflags && 2341 prev->protection == entry->protection && 2342 prev->max_protection == entry->max_protection && 2343 prev->inheritance == entry->inheritance && 2344 prev->wired_count == entry->wired_count && 2345 prev->cred == entry->cred); 2346 } 2347 2348 static void 2349 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2350 { 2351 2352 /* 2353 * If the backing object is a vnode object, vm_object_deallocate() 2354 * calls vrele(). However, vrele() does not lock the vnode because 2355 * the vnode has additional references. Thus, the map lock can be 2356 * kept without causing a lock-order reversal with the vnode lock. 2357 * 2358 * Since we count the number of virtual page mappings in 2359 * object->un_pager.vnp.writemappings, the writemappings value 2360 * should not be adjusted when the entry is disposed of. 2361 */ 2362 if (entry->object.vm_object != NULL) 2363 vm_object_deallocate(entry->object.vm_object); 2364 if (entry->cred != NULL) 2365 crfree(entry->cred); 2366 vm_map_entry_dispose(map, entry); 2367 } 2368 2369 /* 2370 * vm_map_try_merge_entries: 2371 * 2372 * Compare two map entries that represent consecutive ranges. If 2373 * the entries can be merged, expand the range of the second to 2374 * cover the range of the first and delete the first. Then return 2375 * the map entry that includes the first range. 2376 * 2377 * The map must be locked. 2378 */ 2379 vm_map_entry_t 2380 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, 2381 vm_map_entry_t entry) 2382 { 2383 2384 VM_MAP_ASSERT_LOCKED(map); 2385 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && 2386 vm_map_mergeable_neighbors(prev_entry, entry)) { 2387 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); 2388 vm_map_merged_neighbor_dispose(map, prev_entry); 2389 return (entry); 2390 } 2391 return (prev_entry); 2392 } 2393 2394 /* 2395 * vm_map_entry_back: 2396 * 2397 * Allocate an object to back a map entry. 2398 */ 2399 static inline void 2400 vm_map_entry_back(vm_map_entry_t entry) 2401 { 2402 vm_object_t object; 2403 2404 KASSERT(entry->object.vm_object == NULL, 2405 ("map entry %p has backing object", entry)); 2406 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2407 ("map entry %p is a submap", entry)); 2408 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, 2409 entry->cred, entry->end - entry->start); 2410 entry->object.vm_object = object; 2411 entry->offset = 0; 2412 entry->cred = NULL; 2413 } 2414 2415 /* 2416 * vm_map_entry_charge_object 2417 * 2418 * If there is no object backing this entry, create one. Otherwise, if 2419 * the entry has cred, give it to the backing object. 2420 */ 2421 static inline void 2422 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) 2423 { 2424 2425 VM_MAP_ASSERT_LOCKED(map); 2426 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2427 ("map entry %p is a submap", entry)); 2428 if (entry->object.vm_object == NULL && !map->system_map && 2429 (entry->eflags & MAP_ENTRY_GUARD) == 0) 2430 vm_map_entry_back(entry); 2431 else if (entry->object.vm_object != NULL && 2432 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2433 entry->cred != NULL) { 2434 VM_OBJECT_WLOCK(entry->object.vm_object); 2435 KASSERT(entry->object.vm_object->cred == NULL, 2436 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); 2437 entry->object.vm_object->cred = entry->cred; 2438 entry->object.vm_object->charge = entry->end - entry->start; 2439 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2440 entry->cred = NULL; 2441 } 2442 } 2443 2444 /* 2445 * vm_map_entry_clone 2446 * 2447 * Create a duplicate map entry for clipping. 2448 */ 2449 static vm_map_entry_t 2450 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry) 2451 { 2452 vm_map_entry_t new_entry; 2453 2454 VM_MAP_ASSERT_LOCKED(map); 2455 2456 /* 2457 * Create a backing object now, if none exists, so that more individual 2458 * objects won't be created after the map entry is split. 2459 */ 2460 vm_map_entry_charge_object(map, entry); 2461 2462 /* Clone the entry. */ 2463 new_entry = vm_map_entry_create(map); 2464 *new_entry = *entry; 2465 if (new_entry->cred != NULL) 2466 crhold(entry->cred); 2467 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2468 vm_object_reference(new_entry->object.vm_object); 2469 vm_map_entry_set_vnode_text(new_entry, true); 2470 /* 2471 * The object->un_pager.vnp.writemappings for the object of 2472 * MAP_ENTRY_WRITECNT type entry shall be kept as is here. The 2473 * virtual pages are re-distributed among the clipped entries, 2474 * so the sum is left the same. 2475 */ 2476 } 2477 return (new_entry); 2478 } 2479 2480 /* 2481 * vm_map_clip_start: [ internal use only ] 2482 * 2483 * Asserts that the given entry begins at or after 2484 * the specified address; if necessary, 2485 * it splits the entry into two. 2486 */ 2487 static int 2488 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr) 2489 { 2490 vm_map_entry_t new_entry; 2491 int bdry_idx; 2492 2493 if (!map->system_map) 2494 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2495 "%s: map %p entry %p start 0x%jx", __func__, map, entry, 2496 (uintmax_t)startaddr); 2497 2498 if (startaddr <= entry->start) 2499 return (KERN_SUCCESS); 2500 2501 VM_MAP_ASSERT_LOCKED(map); 2502 KASSERT(entry->end > startaddr && entry->start < startaddr, 2503 ("%s: invalid clip of entry %p", __func__, entry)); 2504 2505 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2506 if (bdry_idx != 0) { 2507 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) 2508 return (KERN_INVALID_ARGUMENT); 2509 } 2510 2511 new_entry = vm_map_entry_clone(map, entry); 2512 2513 /* 2514 * Split off the front portion. Insert the new entry BEFORE this one, 2515 * so that this entry has the specified starting address. 2516 */ 2517 new_entry->end = startaddr; 2518 vm_map_entry_link(map, new_entry); 2519 return (KERN_SUCCESS); 2520 } 2521 2522 /* 2523 * vm_map_lookup_clip_start: 2524 * 2525 * Find the entry at or just after 'start', and clip it if 'start' is in 2526 * the interior of the entry. Return entry after 'start', and in 2527 * prev_entry set the entry before 'start'. 2528 */ 2529 static int 2530 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, 2531 vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry) 2532 { 2533 vm_map_entry_t entry; 2534 int rv; 2535 2536 if (!map->system_map) 2537 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2538 "%s: map %p start 0x%jx prev %p", __func__, map, 2539 (uintmax_t)start, prev_entry); 2540 2541 if (vm_map_lookup_entry(map, start, prev_entry)) { 2542 entry = *prev_entry; 2543 rv = vm_map_clip_start(map, entry, start); 2544 if (rv != KERN_SUCCESS) 2545 return (rv); 2546 *prev_entry = vm_map_entry_pred(entry); 2547 } else 2548 entry = vm_map_entry_succ(*prev_entry); 2549 *res_entry = entry; 2550 return (KERN_SUCCESS); 2551 } 2552 2553 /* 2554 * vm_map_clip_end: [ internal use only ] 2555 * 2556 * Asserts that the given entry ends at or before 2557 * the specified address; if necessary, 2558 * it splits the entry into two. 2559 */ 2560 static int 2561 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr) 2562 { 2563 vm_map_entry_t new_entry; 2564 int bdry_idx; 2565 2566 if (!map->system_map) 2567 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2568 "%s: map %p entry %p end 0x%jx", __func__, map, entry, 2569 (uintmax_t)endaddr); 2570 2571 if (endaddr >= entry->end) 2572 return (KERN_SUCCESS); 2573 2574 VM_MAP_ASSERT_LOCKED(map); 2575 KASSERT(entry->start < endaddr && entry->end > endaddr, 2576 ("%s: invalid clip of entry %p", __func__, entry)); 2577 2578 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2579 if (bdry_idx != 0) { 2580 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) 2581 return (KERN_INVALID_ARGUMENT); 2582 } 2583 2584 new_entry = vm_map_entry_clone(map, entry); 2585 2586 /* 2587 * Split off the back portion. Insert the new entry AFTER this one, 2588 * so that this entry has the specified ending address. 2589 */ 2590 new_entry->start = endaddr; 2591 vm_map_entry_link(map, new_entry); 2592 2593 return (KERN_SUCCESS); 2594 } 2595 2596 /* 2597 * vm_map_submap: [ kernel use only ] 2598 * 2599 * Mark the given range as handled by a subordinate map. 2600 * 2601 * This range must have been created with vm_map_find, 2602 * and no other operations may have been performed on this 2603 * range prior to calling vm_map_submap. 2604 * 2605 * Only a limited number of operations can be performed 2606 * within this rage after calling vm_map_submap: 2607 * vm_fault 2608 * [Don't try vm_map_copy!] 2609 * 2610 * To remove a submapping, one must first remove the 2611 * range from the superior map, and then destroy the 2612 * submap (if desired). [Better yet, don't try it.] 2613 */ 2614 int 2615 vm_map_submap( 2616 vm_map_t map, 2617 vm_offset_t start, 2618 vm_offset_t end, 2619 vm_map_t submap) 2620 { 2621 vm_map_entry_t entry; 2622 int result; 2623 2624 result = KERN_INVALID_ARGUMENT; 2625 2626 vm_map_lock(submap); 2627 submap->flags |= MAP_IS_SUB_MAP; 2628 vm_map_unlock(submap); 2629 2630 vm_map_lock(map); 2631 VM_MAP_RANGE_CHECK(map, start, end); 2632 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && 2633 (entry->eflags & MAP_ENTRY_COW) == 0 && 2634 entry->object.vm_object == NULL) { 2635 result = vm_map_clip_start(map, entry, start); 2636 if (result != KERN_SUCCESS) 2637 goto unlock; 2638 result = vm_map_clip_end(map, entry, end); 2639 if (result != KERN_SUCCESS) 2640 goto unlock; 2641 entry->object.sub_map = submap; 2642 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2643 result = KERN_SUCCESS; 2644 } 2645 unlock: 2646 vm_map_unlock(map); 2647 2648 if (result != KERN_SUCCESS) { 2649 vm_map_lock(submap); 2650 submap->flags &= ~MAP_IS_SUB_MAP; 2651 vm_map_unlock(submap); 2652 } 2653 return (result); 2654 } 2655 2656 /* 2657 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2658 */ 2659 #define MAX_INIT_PT 96 2660 2661 /* 2662 * vm_map_pmap_enter: 2663 * 2664 * Preload the specified map's pmap with mappings to the specified 2665 * object's memory-resident pages. No further physical pages are 2666 * allocated, and no further virtual pages are retrieved from secondary 2667 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2668 * limited number of page mappings are created at the low-end of the 2669 * specified address range. (For this purpose, a superpage mapping 2670 * counts as one page mapping.) Otherwise, all resident pages within 2671 * the specified address range are mapped. 2672 */ 2673 static void 2674 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2675 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2676 { 2677 vm_offset_t start; 2678 vm_page_t p, p_start; 2679 vm_pindex_t mask, psize, threshold, tmpidx; 2680 int psind; 2681 2682 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2683 return; 2684 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2685 VM_OBJECT_WLOCK(object); 2686 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2687 pmap_object_init_pt(map->pmap, addr, object, pindex, 2688 size); 2689 VM_OBJECT_WUNLOCK(object); 2690 return; 2691 } 2692 VM_OBJECT_LOCK_DOWNGRADE(object); 2693 } else 2694 VM_OBJECT_RLOCK(object); 2695 2696 psize = atop(size); 2697 if (psize + pindex > object->size) { 2698 if (pindex >= object->size) { 2699 VM_OBJECT_RUNLOCK(object); 2700 return; 2701 } 2702 psize = object->size - pindex; 2703 } 2704 2705 start = 0; 2706 p_start = NULL; 2707 threshold = MAX_INIT_PT; 2708 2709 p = vm_page_find_least(object, pindex); 2710 /* 2711 * Assert: the variable p is either (1) the page with the 2712 * least pindex greater than or equal to the parameter pindex 2713 * or (2) NULL. 2714 */ 2715 for (; 2716 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2717 p = TAILQ_NEXT(p, listq)) { 2718 /* 2719 * don't allow an madvise to blow away our really 2720 * free pages allocating pv entries. 2721 */ 2722 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2723 vm_page_count_severe()) || 2724 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2725 tmpidx >= threshold)) { 2726 psize = tmpidx; 2727 break; 2728 } 2729 if (vm_page_all_valid(p)) { 2730 if (p_start == NULL) { 2731 start = addr + ptoa(tmpidx); 2732 p_start = p; 2733 } 2734 /* Jump ahead if a superpage mapping is possible. */ 2735 for (psind = p->psind; psind > 0; psind--) { 2736 if (((addr + ptoa(tmpidx)) & 2737 (pagesizes[psind] - 1)) == 0) { 2738 mask = atop(pagesizes[psind]) - 1; 2739 if (tmpidx + mask < psize && 2740 vm_page_ps_test(p, psind, 2741 PS_ALL_VALID, NULL)) { 2742 p += mask; 2743 threshold += mask; 2744 break; 2745 } 2746 } 2747 } 2748 } else if (p_start != NULL) { 2749 pmap_enter_object(map->pmap, start, addr + 2750 ptoa(tmpidx), p_start, prot); 2751 p_start = NULL; 2752 } 2753 } 2754 if (p_start != NULL) 2755 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2756 p_start, prot); 2757 VM_OBJECT_RUNLOCK(object); 2758 } 2759 2760 static void 2761 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot, 2762 vm_prot_t new_maxprot, int flags) 2763 { 2764 vm_prot_t old_prot; 2765 2766 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); 2767 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP | 2768 MAP_ENTRY_STACK_GAP_DN)) == 0) 2769 return; 2770 2771 old_prot = PROT_EXTRACT(entry->offset); 2772 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2773 entry->offset = PROT_MAX(new_maxprot) | 2774 (new_maxprot & old_prot); 2775 } 2776 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) { 2777 entry->offset = new_prot | PROT_MAX( 2778 PROT_MAX_EXTRACT(entry->offset)); 2779 } 2780 } 2781 2782 /* 2783 * vm_map_protect: 2784 * 2785 * Sets the protection and/or the maximum protection of the 2786 * specified address region in the target map. 2787 */ 2788 int 2789 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2790 vm_prot_t new_prot, vm_prot_t new_maxprot, int flags) 2791 { 2792 vm_map_entry_t entry, first_entry, in_tran, prev_entry; 2793 vm_object_t obj; 2794 struct ucred *cred; 2795 vm_offset_t orig_start; 2796 vm_prot_t check_prot, max_prot, old_prot; 2797 int rv; 2798 2799 if (start == end) 2800 return (KERN_SUCCESS); 2801 2802 if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT | 2803 VM_MAP_PROTECT_SET_MAXPROT) && 2804 !CONTAINS_BITS(new_maxprot, new_prot)) 2805 return (KERN_OUT_OF_BOUNDS); 2806 2807 orig_start = start; 2808 again: 2809 in_tran = NULL; 2810 start = orig_start; 2811 vm_map_lock(map); 2812 2813 if ((map->flags & MAP_WXORX) != 0 && 2814 (flags & VM_MAP_PROTECT_SET_PROT) != 0 && 2815 CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) { 2816 vm_map_unlock(map); 2817 return (KERN_PROTECTION_FAILURE); 2818 } 2819 2820 /* 2821 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2822 * need to fault pages into the map and will drop the map lock while 2823 * doing so, and the VM object may end up in an inconsistent state if we 2824 * update the protection on the map entry in between faults. 2825 */ 2826 vm_map_wait_busy(map); 2827 2828 VM_MAP_RANGE_CHECK(map, start, end); 2829 2830 if (!vm_map_lookup_entry(map, start, &first_entry)) 2831 first_entry = vm_map_entry_succ(first_entry); 2832 2833 if ((flags & VM_MAP_PROTECT_GROWSDOWN) != 0 && 2834 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { 2835 /* 2836 * Handle Linux's PROT_GROWSDOWN flag. 2837 * It means that protection is applied down to the 2838 * whole stack, including the specified range of the 2839 * mapped region, and the grow down region (AKA 2840 * guard). 2841 */ 2842 while (!CONTAINS_BITS(first_entry->eflags, 2843 MAP_ENTRY_GUARD | MAP_ENTRY_STACK_GAP_DN) && 2844 first_entry != vm_map_entry_first(map)) 2845 first_entry = vm_map_entry_pred(first_entry); 2846 start = first_entry->start; 2847 } 2848 2849 /* 2850 * Make a first pass to check for protection violations. 2851 */ 2852 check_prot = 0; 2853 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2854 check_prot |= new_prot; 2855 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) 2856 check_prot |= new_maxprot; 2857 for (entry = first_entry; entry->start < end; 2858 entry = vm_map_entry_succ(entry)) { 2859 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 2860 vm_map_unlock(map); 2861 return (KERN_INVALID_ARGUMENT); 2862 } 2863 if ((entry->eflags & (MAP_ENTRY_GUARD | 2864 MAP_ENTRY_STACK_GAP_DN | MAP_ENTRY_STACK_GAP_UP)) == 2865 MAP_ENTRY_GUARD) 2866 continue; 2867 max_prot = (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 2868 MAP_ENTRY_STACK_GAP_UP)) != 0 ? 2869 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; 2870 if (!CONTAINS_BITS(max_prot, check_prot)) { 2871 vm_map_unlock(map); 2872 return (KERN_PROTECTION_FAILURE); 2873 } 2874 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2875 in_tran = entry; 2876 } 2877 2878 /* 2879 * Postpone the operation until all in-transition map entries have 2880 * stabilized. An in-transition entry might already have its pages 2881 * wired and wired_count incremented, but not yet have its 2882 * MAP_ENTRY_USER_WIRED flag set. In which case, we would fail to call 2883 * vm_fault_copy_entry() in the final loop below. 2884 */ 2885 if (in_tran != NULL) { 2886 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2887 vm_map_unlock_and_wait(map, 0); 2888 goto again; 2889 } 2890 2891 /* 2892 * Before changing the protections, try to reserve swap space for any 2893 * private (i.e., copy-on-write) mappings that are transitioning from 2894 * read-only to read/write access. If a reservation fails, break out 2895 * of this loop early and let the next loop simplify the entries, since 2896 * some may now be mergeable. 2897 */ 2898 rv = vm_map_clip_start(map, first_entry, start); 2899 if (rv != KERN_SUCCESS) { 2900 vm_map_unlock(map); 2901 return (rv); 2902 } 2903 for (entry = first_entry; entry->start < end; 2904 entry = vm_map_entry_succ(entry)) { 2905 rv = vm_map_clip_end(map, entry, end); 2906 if (rv != KERN_SUCCESS) { 2907 vm_map_unlock(map); 2908 return (rv); 2909 } 2910 2911 if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 || 2912 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || 2913 ENTRY_CHARGED(entry) || 2914 (entry->eflags & MAP_ENTRY_GUARD) != 0) 2915 continue; 2916 2917 cred = curthread->td_ucred; 2918 obj = entry->object.vm_object; 2919 2920 if (obj == NULL || 2921 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { 2922 if (!swap_reserve(entry->end - entry->start)) { 2923 rv = KERN_RESOURCE_SHORTAGE; 2924 end = entry->end; 2925 break; 2926 } 2927 crhold(cred); 2928 entry->cred = cred; 2929 continue; 2930 } 2931 2932 VM_OBJECT_WLOCK(obj); 2933 if ((obj->flags & OBJ_SWAP) == 0) { 2934 VM_OBJECT_WUNLOCK(obj); 2935 continue; 2936 } 2937 2938 /* 2939 * Charge for the whole object allocation now, since 2940 * we cannot distinguish between non-charged and 2941 * charged clipped mapping of the same object later. 2942 */ 2943 KASSERT(obj->charge == 0, 2944 ("vm_map_protect: object %p overcharged (entry %p)", 2945 obj, entry)); 2946 if (!swap_reserve(ptoa(obj->size))) { 2947 VM_OBJECT_WUNLOCK(obj); 2948 rv = KERN_RESOURCE_SHORTAGE; 2949 end = entry->end; 2950 break; 2951 } 2952 2953 crhold(cred); 2954 obj->cred = cred; 2955 obj->charge = ptoa(obj->size); 2956 VM_OBJECT_WUNLOCK(obj); 2957 } 2958 2959 /* 2960 * If enough swap space was available, go back and fix up protections. 2961 * Otherwise, just simplify entries, since some may have been modified. 2962 * [Note that clipping is not necessary the second time.] 2963 */ 2964 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; 2965 entry->start < end; 2966 vm_map_try_merge_entries(map, prev_entry, entry), 2967 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 2968 if (rv != KERN_SUCCESS) 2969 continue; 2970 2971 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 2972 vm_map_protect_guard(entry, new_prot, new_maxprot, 2973 flags); 2974 continue; 2975 } 2976 2977 old_prot = entry->protection; 2978 2979 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2980 entry->max_protection = new_maxprot; 2981 entry->protection = new_maxprot & old_prot; 2982 } 2983 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2984 entry->protection = new_prot; 2985 2986 /* 2987 * For user wired map entries, the normal lazy evaluation of 2988 * write access upgrades through soft page faults is 2989 * undesirable. Instead, immediately copy any pages that are 2990 * copy-on-write and enable write access in the physical map. 2991 */ 2992 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2993 (entry->protection & VM_PROT_WRITE) != 0 && 2994 (old_prot & VM_PROT_WRITE) == 0) 2995 vm_fault_copy_entry(map, map, entry, entry, NULL); 2996 2997 /* 2998 * When restricting access, update the physical map. Worry 2999 * about copy-on-write here. 3000 */ 3001 if ((old_prot & ~entry->protection) != 0) { 3002 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 3003 VM_PROT_ALL) 3004 pmap_protect(map->pmap, entry->start, 3005 entry->end, 3006 entry->protection & MASK(entry)); 3007 #undef MASK 3008 } 3009 } 3010 vm_map_try_merge_entries(map, prev_entry, entry); 3011 vm_map_unlock(map); 3012 return (rv); 3013 } 3014 3015 /* 3016 * vm_map_madvise: 3017 * 3018 * This routine traverses a processes map handling the madvise 3019 * system call. Advisories are classified as either those effecting 3020 * the vm_map_entry structure, or those effecting the underlying 3021 * objects. 3022 */ 3023 int 3024 vm_map_madvise( 3025 vm_map_t map, 3026 vm_offset_t start, 3027 vm_offset_t end, 3028 int behav) 3029 { 3030 vm_map_entry_t entry, prev_entry; 3031 int rv; 3032 bool modify_map; 3033 3034 /* 3035 * Some madvise calls directly modify the vm_map_entry, in which case 3036 * we need to use an exclusive lock on the map and we need to perform 3037 * various clipping operations. Otherwise we only need a read-lock 3038 * on the map. 3039 */ 3040 switch(behav) { 3041 case MADV_NORMAL: 3042 case MADV_SEQUENTIAL: 3043 case MADV_RANDOM: 3044 case MADV_NOSYNC: 3045 case MADV_AUTOSYNC: 3046 case MADV_NOCORE: 3047 case MADV_CORE: 3048 if (start == end) 3049 return (0); 3050 modify_map = true; 3051 vm_map_lock(map); 3052 break; 3053 case MADV_WILLNEED: 3054 case MADV_DONTNEED: 3055 case MADV_FREE: 3056 if (start == end) 3057 return (0); 3058 modify_map = false; 3059 vm_map_lock_read(map); 3060 break; 3061 default: 3062 return (EINVAL); 3063 } 3064 3065 /* 3066 * Locate starting entry and clip if necessary. 3067 */ 3068 VM_MAP_RANGE_CHECK(map, start, end); 3069 3070 if (modify_map) { 3071 /* 3072 * madvise behaviors that are implemented in the vm_map_entry. 3073 * 3074 * We clip the vm_map_entry so that behavioral changes are 3075 * limited to the specified address range. 3076 */ 3077 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry); 3078 if (rv != KERN_SUCCESS) { 3079 vm_map_unlock(map); 3080 return (vm_mmap_to_errno(rv)); 3081 } 3082 3083 for (; entry->start < end; prev_entry = entry, 3084 entry = vm_map_entry_succ(entry)) { 3085 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 3086 continue; 3087 3088 rv = vm_map_clip_end(map, entry, end); 3089 if (rv != KERN_SUCCESS) { 3090 vm_map_unlock(map); 3091 return (vm_mmap_to_errno(rv)); 3092 } 3093 3094 switch (behav) { 3095 case MADV_NORMAL: 3096 vm_map_entry_set_behavior(entry, 3097 MAP_ENTRY_BEHAV_NORMAL); 3098 break; 3099 case MADV_SEQUENTIAL: 3100 vm_map_entry_set_behavior(entry, 3101 MAP_ENTRY_BEHAV_SEQUENTIAL); 3102 break; 3103 case MADV_RANDOM: 3104 vm_map_entry_set_behavior(entry, 3105 MAP_ENTRY_BEHAV_RANDOM); 3106 break; 3107 case MADV_NOSYNC: 3108 entry->eflags |= MAP_ENTRY_NOSYNC; 3109 break; 3110 case MADV_AUTOSYNC: 3111 entry->eflags &= ~MAP_ENTRY_NOSYNC; 3112 break; 3113 case MADV_NOCORE: 3114 entry->eflags |= MAP_ENTRY_NOCOREDUMP; 3115 break; 3116 case MADV_CORE: 3117 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; 3118 break; 3119 default: 3120 break; 3121 } 3122 vm_map_try_merge_entries(map, prev_entry, entry); 3123 } 3124 vm_map_try_merge_entries(map, prev_entry, entry); 3125 vm_map_unlock(map); 3126 } else { 3127 vm_pindex_t pstart, pend; 3128 3129 /* 3130 * madvise behaviors that are implemented in the underlying 3131 * vm_object. 3132 * 3133 * Since we don't clip the vm_map_entry, we have to clip 3134 * the vm_object pindex and count. 3135 */ 3136 if (!vm_map_lookup_entry(map, start, &entry)) 3137 entry = vm_map_entry_succ(entry); 3138 for (; entry->start < end; 3139 entry = vm_map_entry_succ(entry)) { 3140 vm_offset_t useEnd, useStart; 3141 3142 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | 3143 MAP_ENTRY_GUARD)) != 0) 3144 continue; 3145 3146 /* 3147 * MADV_FREE would otherwise rewind time to 3148 * the creation of the shadow object. Because 3149 * we hold the VM map read-locked, neither the 3150 * entry's object nor the presence of a 3151 * backing object can change. 3152 */ 3153 if (behav == MADV_FREE && 3154 entry->object.vm_object != NULL && 3155 entry->object.vm_object->backing_object != NULL) 3156 continue; 3157 3158 pstart = OFF_TO_IDX(entry->offset); 3159 pend = pstart + atop(entry->end - entry->start); 3160 useStart = entry->start; 3161 useEnd = entry->end; 3162 3163 if (entry->start < start) { 3164 pstart += atop(start - entry->start); 3165 useStart = start; 3166 } 3167 if (entry->end > end) { 3168 pend -= atop(entry->end - end); 3169 useEnd = end; 3170 } 3171 3172 if (pstart >= pend) 3173 continue; 3174 3175 /* 3176 * Perform the pmap_advise() before clearing 3177 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 3178 * concurrent pmap operation, such as pmap_remove(), 3179 * could clear a reference in the pmap and set 3180 * PGA_REFERENCED on the page before the pmap_advise() 3181 * had completed. Consequently, the page would appear 3182 * referenced based upon an old reference that 3183 * occurred before this pmap_advise() ran. 3184 */ 3185 if (behav == MADV_DONTNEED || behav == MADV_FREE) 3186 pmap_advise(map->pmap, useStart, useEnd, 3187 behav); 3188 3189 vm_object_madvise(entry->object.vm_object, pstart, 3190 pend, behav); 3191 3192 /* 3193 * Pre-populate paging structures in the 3194 * WILLNEED case. For wired entries, the 3195 * paging structures are already populated. 3196 */ 3197 if (behav == MADV_WILLNEED && 3198 entry->wired_count == 0) { 3199 vm_map_pmap_enter(map, 3200 useStart, 3201 entry->protection, 3202 entry->object.vm_object, 3203 pstart, 3204 ptoa(pend - pstart), 3205 MAP_PREFAULT_MADVISE 3206 ); 3207 } 3208 } 3209 vm_map_unlock_read(map); 3210 } 3211 return (0); 3212 } 3213 3214 /* 3215 * vm_map_inherit: 3216 * 3217 * Sets the inheritance of the specified address 3218 * range in the target map. Inheritance 3219 * affects how the map will be shared with 3220 * child maps at the time of vmspace_fork. 3221 */ 3222 int 3223 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 3224 vm_inherit_t new_inheritance) 3225 { 3226 vm_map_entry_t entry, lentry, prev_entry, start_entry; 3227 int rv; 3228 3229 switch (new_inheritance) { 3230 case VM_INHERIT_NONE: 3231 case VM_INHERIT_COPY: 3232 case VM_INHERIT_SHARE: 3233 case VM_INHERIT_ZERO: 3234 break; 3235 default: 3236 return (KERN_INVALID_ARGUMENT); 3237 } 3238 if (start == end) 3239 return (KERN_SUCCESS); 3240 vm_map_lock(map); 3241 VM_MAP_RANGE_CHECK(map, start, end); 3242 rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry); 3243 if (rv != KERN_SUCCESS) 3244 goto unlock; 3245 if (vm_map_lookup_entry(map, end - 1, &lentry)) { 3246 rv = vm_map_clip_end(map, lentry, end); 3247 if (rv != KERN_SUCCESS) 3248 goto unlock; 3249 } 3250 if (new_inheritance == VM_INHERIT_COPY) { 3251 for (entry = start_entry; entry->start < end; 3252 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3253 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) 3254 != 0) { 3255 rv = KERN_INVALID_ARGUMENT; 3256 goto unlock; 3257 } 3258 } 3259 } 3260 for (entry = start_entry; entry->start < end; prev_entry = entry, 3261 entry = vm_map_entry_succ(entry)) { 3262 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", 3263 entry, (uintmax_t)entry->end, (uintmax_t)end)); 3264 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 3265 new_inheritance != VM_INHERIT_ZERO) 3266 entry->inheritance = new_inheritance; 3267 vm_map_try_merge_entries(map, prev_entry, entry); 3268 } 3269 vm_map_try_merge_entries(map, prev_entry, entry); 3270 unlock: 3271 vm_map_unlock(map); 3272 return (rv); 3273 } 3274 3275 /* 3276 * vm_map_entry_in_transition: 3277 * 3278 * Release the map lock, and sleep until the entry is no longer in 3279 * transition. Awake and acquire the map lock. If the map changed while 3280 * another held the lock, lookup a possibly-changed entry at or after the 3281 * 'start' position of the old entry. 3282 */ 3283 static vm_map_entry_t 3284 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, 3285 vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry) 3286 { 3287 vm_map_entry_t entry; 3288 vm_offset_t start; 3289 u_int last_timestamp; 3290 3291 VM_MAP_ASSERT_LOCKED(map); 3292 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3293 ("not in-tranition map entry %p", in_entry)); 3294 /* 3295 * We have not yet clipped the entry. 3296 */ 3297 start = MAX(in_start, in_entry->start); 3298 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3299 last_timestamp = map->timestamp; 3300 if (vm_map_unlock_and_wait(map, 0)) { 3301 /* 3302 * Allow interruption of user wiring/unwiring? 3303 */ 3304 } 3305 vm_map_lock(map); 3306 if (last_timestamp + 1 == map->timestamp) 3307 return (in_entry); 3308 3309 /* 3310 * Look again for the entry because the map was modified while it was 3311 * unlocked. Specifically, the entry may have been clipped, merged, or 3312 * deleted. 3313 */ 3314 if (!vm_map_lookup_entry(map, start, &entry)) { 3315 if (!holes_ok) { 3316 *io_end = start; 3317 return (NULL); 3318 } 3319 entry = vm_map_entry_succ(entry); 3320 } 3321 return (entry); 3322 } 3323 3324 /* 3325 * vm_map_unwire: 3326 * 3327 * Implements both kernel and user unwiring. 3328 */ 3329 int 3330 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 3331 int flags) 3332 { 3333 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3334 int rv; 3335 bool holes_ok, need_wakeup, user_unwire; 3336 3337 if (start == end) 3338 return (KERN_SUCCESS); 3339 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3340 user_unwire = (flags & VM_MAP_WIRE_USER) != 0; 3341 vm_map_lock(map); 3342 VM_MAP_RANGE_CHECK(map, start, end); 3343 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3344 if (holes_ok) 3345 first_entry = vm_map_entry_succ(first_entry); 3346 else { 3347 vm_map_unlock(map); 3348 return (KERN_INVALID_ADDRESS); 3349 } 3350 } 3351 rv = KERN_SUCCESS; 3352 for (entry = first_entry; entry->start < end; entry = next_entry) { 3353 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3354 /* 3355 * We have not yet clipped the entry. 3356 */ 3357 next_entry = vm_map_entry_in_transition(map, start, 3358 &end, holes_ok, entry); 3359 if (next_entry == NULL) { 3360 if (entry == first_entry) { 3361 vm_map_unlock(map); 3362 return (KERN_INVALID_ADDRESS); 3363 } 3364 rv = KERN_INVALID_ADDRESS; 3365 break; 3366 } 3367 first_entry = (entry == first_entry) ? 3368 next_entry : NULL; 3369 continue; 3370 } 3371 rv = vm_map_clip_start(map, entry, start); 3372 if (rv != KERN_SUCCESS) 3373 break; 3374 rv = vm_map_clip_end(map, entry, end); 3375 if (rv != KERN_SUCCESS) 3376 break; 3377 3378 /* 3379 * Mark the entry in case the map lock is released. (See 3380 * above.) 3381 */ 3382 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3383 entry->wiring_thread == NULL, 3384 ("owned map entry %p", entry)); 3385 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3386 entry->wiring_thread = curthread; 3387 next_entry = vm_map_entry_succ(entry); 3388 /* 3389 * Check the map for holes in the specified region. 3390 * If holes_ok, skip this check. 3391 */ 3392 if (!holes_ok && 3393 entry->end < end && next_entry->start > entry->end) { 3394 end = entry->end; 3395 rv = KERN_INVALID_ADDRESS; 3396 break; 3397 } 3398 /* 3399 * If system unwiring, require that the entry is system wired. 3400 */ 3401 if (!user_unwire && 3402 vm_map_entry_system_wired_count(entry) == 0) { 3403 end = entry->end; 3404 rv = KERN_INVALID_ARGUMENT; 3405 break; 3406 } 3407 } 3408 need_wakeup = false; 3409 if (first_entry == NULL && 3410 !vm_map_lookup_entry(map, start, &first_entry)) { 3411 KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); 3412 prev_entry = first_entry; 3413 entry = vm_map_entry_succ(first_entry); 3414 } else { 3415 prev_entry = vm_map_entry_pred(first_entry); 3416 entry = first_entry; 3417 } 3418 for (; entry->start < end; 3419 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3420 /* 3421 * If holes_ok was specified, an empty 3422 * space in the unwired region could have been mapped 3423 * while the map lock was dropped for draining 3424 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 3425 * could be simultaneously wiring this new mapping 3426 * entry. Detect these cases and skip any entries 3427 * marked as in transition by us. 3428 */ 3429 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3430 entry->wiring_thread != curthread) { 3431 KASSERT(holes_ok, 3432 ("vm_map_unwire: !HOLESOK and new/changed entry")); 3433 continue; 3434 } 3435 3436 if (rv == KERN_SUCCESS && (!user_unwire || 3437 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 3438 if (entry->wired_count == 1) 3439 vm_map_entry_unwire(map, entry); 3440 else 3441 entry->wired_count--; 3442 if (user_unwire) 3443 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3444 } 3445 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3446 ("vm_map_unwire: in-transition flag missing %p", entry)); 3447 KASSERT(entry->wiring_thread == curthread, 3448 ("vm_map_unwire: alien wire %p", entry)); 3449 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 3450 entry->wiring_thread = NULL; 3451 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3452 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3453 need_wakeup = true; 3454 } 3455 vm_map_try_merge_entries(map, prev_entry, entry); 3456 } 3457 vm_map_try_merge_entries(map, prev_entry, entry); 3458 vm_map_unlock(map); 3459 if (need_wakeup) 3460 vm_map_wakeup(map); 3461 return (rv); 3462 } 3463 3464 static void 3465 vm_map_wire_user_count_sub(u_long npages) 3466 { 3467 3468 atomic_subtract_long(&vm_user_wire_count, npages); 3469 } 3470 3471 static bool 3472 vm_map_wire_user_count_add(u_long npages) 3473 { 3474 u_long wired; 3475 3476 wired = vm_user_wire_count; 3477 do { 3478 if (npages + wired > vm_page_max_user_wired) 3479 return (false); 3480 } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, 3481 npages + wired)); 3482 3483 return (true); 3484 } 3485 3486 /* 3487 * vm_map_wire_entry_failure: 3488 * 3489 * Handle a wiring failure on the given entry. 3490 * 3491 * The map should be locked. 3492 */ 3493 static void 3494 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 3495 vm_offset_t failed_addr) 3496 { 3497 3498 VM_MAP_ASSERT_LOCKED(map); 3499 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 3500 entry->wired_count == 1, 3501 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 3502 KASSERT(failed_addr < entry->end, 3503 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 3504 3505 /* 3506 * If any pages at the start of this entry were successfully wired, 3507 * then unwire them. 3508 */ 3509 if (failed_addr > entry->start) { 3510 pmap_unwire(map->pmap, entry->start, failed_addr); 3511 vm_object_unwire(entry->object.vm_object, entry->offset, 3512 failed_addr - entry->start, PQ_ACTIVE); 3513 } 3514 3515 /* 3516 * Assign an out-of-range value to represent the failure to wire this 3517 * entry. 3518 */ 3519 entry->wired_count = -1; 3520 } 3521 3522 int 3523 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3524 { 3525 int rv; 3526 3527 vm_map_lock(map); 3528 rv = vm_map_wire_locked(map, start, end, flags); 3529 vm_map_unlock(map); 3530 return (rv); 3531 } 3532 3533 /* 3534 * vm_map_wire_locked: 3535 * 3536 * Implements both kernel and user wiring. Returns with the map locked, 3537 * the map lock may be dropped. 3538 */ 3539 int 3540 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3541 { 3542 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3543 vm_offset_t faddr, saved_end, saved_start; 3544 u_long incr, npages; 3545 u_int bidx, last_timestamp; 3546 int rv; 3547 bool holes_ok, need_wakeup, user_wire; 3548 vm_prot_t prot; 3549 3550 VM_MAP_ASSERT_LOCKED(map); 3551 3552 if (start == end) 3553 return (KERN_SUCCESS); 3554 prot = 0; 3555 if (flags & VM_MAP_WIRE_WRITE) 3556 prot |= VM_PROT_WRITE; 3557 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3558 user_wire = (flags & VM_MAP_WIRE_USER) != 0; 3559 VM_MAP_RANGE_CHECK(map, start, end); 3560 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3561 if (holes_ok) 3562 first_entry = vm_map_entry_succ(first_entry); 3563 else 3564 return (KERN_INVALID_ADDRESS); 3565 } 3566 for (entry = first_entry; entry->start < end; entry = next_entry) { 3567 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3568 /* 3569 * We have not yet clipped the entry. 3570 */ 3571 next_entry = vm_map_entry_in_transition(map, start, 3572 &end, holes_ok, entry); 3573 if (next_entry == NULL) { 3574 if (entry == first_entry) 3575 return (KERN_INVALID_ADDRESS); 3576 rv = KERN_INVALID_ADDRESS; 3577 goto done; 3578 } 3579 first_entry = (entry == first_entry) ? 3580 next_entry : NULL; 3581 continue; 3582 } 3583 rv = vm_map_clip_start(map, entry, start); 3584 if (rv != KERN_SUCCESS) 3585 goto done; 3586 rv = vm_map_clip_end(map, entry, end); 3587 if (rv != KERN_SUCCESS) 3588 goto done; 3589 3590 /* 3591 * Mark the entry in case the map lock is released. (See 3592 * above.) 3593 */ 3594 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3595 entry->wiring_thread == NULL, 3596 ("owned map entry %p", entry)); 3597 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3598 entry->wiring_thread = curthread; 3599 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3600 || (entry->protection & prot) != prot) { 3601 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3602 if (!holes_ok) { 3603 end = entry->end; 3604 rv = KERN_INVALID_ADDRESS; 3605 goto done; 3606 } 3607 } else if (entry->wired_count == 0) { 3608 entry->wired_count++; 3609 3610 npages = atop(entry->end - entry->start); 3611 if (user_wire && !vm_map_wire_user_count_add(npages)) { 3612 vm_map_wire_entry_failure(map, entry, 3613 entry->start); 3614 end = entry->end; 3615 rv = KERN_RESOURCE_SHORTAGE; 3616 goto done; 3617 } 3618 3619 /* 3620 * Release the map lock, relying on the in-transition 3621 * mark. Mark the map busy for fork. 3622 */ 3623 saved_start = entry->start; 3624 saved_end = entry->end; 3625 last_timestamp = map->timestamp; 3626 bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3627 incr = pagesizes[bidx]; 3628 vm_map_busy(map); 3629 vm_map_unlock(map); 3630 3631 for (faddr = saved_start; faddr < saved_end; 3632 faddr += incr) { 3633 /* 3634 * Simulate a fault to get the page and enter 3635 * it into the physical map. 3636 */ 3637 rv = vm_fault(map, faddr, VM_PROT_NONE, 3638 VM_FAULT_WIRE, NULL); 3639 if (rv != KERN_SUCCESS) 3640 break; 3641 } 3642 vm_map_lock(map); 3643 vm_map_unbusy(map); 3644 if (last_timestamp + 1 != map->timestamp) { 3645 /* 3646 * Look again for the entry because the map was 3647 * modified while it was unlocked. The entry 3648 * may have been clipped, but NOT merged or 3649 * deleted. 3650 */ 3651 if (!vm_map_lookup_entry(map, saved_start, 3652 &next_entry)) 3653 KASSERT(false, 3654 ("vm_map_wire: lookup failed")); 3655 first_entry = (entry == first_entry) ? 3656 next_entry : NULL; 3657 for (entry = next_entry; entry->end < saved_end; 3658 entry = vm_map_entry_succ(entry)) { 3659 /* 3660 * In case of failure, handle entries 3661 * that were not fully wired here; 3662 * fully wired entries are handled 3663 * later. 3664 */ 3665 if (rv != KERN_SUCCESS && 3666 faddr < entry->end) 3667 vm_map_wire_entry_failure(map, 3668 entry, faddr); 3669 } 3670 } 3671 if (rv != KERN_SUCCESS) { 3672 vm_map_wire_entry_failure(map, entry, faddr); 3673 if (user_wire) 3674 vm_map_wire_user_count_sub(npages); 3675 end = entry->end; 3676 goto done; 3677 } 3678 } else if (!user_wire || 3679 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3680 entry->wired_count++; 3681 } 3682 /* 3683 * Check the map for holes in the specified region. 3684 * If holes_ok was specified, skip this check. 3685 */ 3686 next_entry = vm_map_entry_succ(entry); 3687 if (!holes_ok && 3688 entry->end < end && next_entry->start > entry->end) { 3689 end = entry->end; 3690 rv = KERN_INVALID_ADDRESS; 3691 goto done; 3692 } 3693 } 3694 rv = KERN_SUCCESS; 3695 done: 3696 need_wakeup = false; 3697 if (first_entry == NULL && 3698 !vm_map_lookup_entry(map, start, &first_entry)) { 3699 KASSERT(holes_ok, ("vm_map_wire: lookup failed")); 3700 prev_entry = first_entry; 3701 entry = vm_map_entry_succ(first_entry); 3702 } else { 3703 prev_entry = vm_map_entry_pred(first_entry); 3704 entry = first_entry; 3705 } 3706 for (; entry->start < end; 3707 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3708 /* 3709 * If holes_ok was specified, an empty 3710 * space in the unwired region could have been mapped 3711 * while the map lock was dropped for faulting in the 3712 * pages or draining MAP_ENTRY_IN_TRANSITION. 3713 * Moreover, another thread could be simultaneously 3714 * wiring this new mapping entry. Detect these cases 3715 * and skip any entries marked as in transition not by us. 3716 * 3717 * Another way to get an entry not marked with 3718 * MAP_ENTRY_IN_TRANSITION is after failed clipping, 3719 * which set rv to KERN_INVALID_ARGUMENT. 3720 */ 3721 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3722 entry->wiring_thread != curthread) { 3723 KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT, 3724 ("vm_map_wire: !HOLESOK and new/changed entry")); 3725 continue; 3726 } 3727 3728 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { 3729 /* do nothing */ 3730 } else if (rv == KERN_SUCCESS) { 3731 if (user_wire) 3732 entry->eflags |= MAP_ENTRY_USER_WIRED; 3733 } else if (entry->wired_count == -1) { 3734 /* 3735 * Wiring failed on this entry. Thus, unwiring is 3736 * unnecessary. 3737 */ 3738 entry->wired_count = 0; 3739 } else if (!user_wire || 3740 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3741 /* 3742 * Undo the wiring. Wiring succeeded on this entry 3743 * but failed on a later entry. 3744 */ 3745 if (entry->wired_count == 1) { 3746 vm_map_entry_unwire(map, entry); 3747 if (user_wire) 3748 vm_map_wire_user_count_sub( 3749 atop(entry->end - entry->start)); 3750 } else 3751 entry->wired_count--; 3752 } 3753 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3754 ("vm_map_wire: in-transition flag missing %p", entry)); 3755 KASSERT(entry->wiring_thread == curthread, 3756 ("vm_map_wire: alien wire %p", entry)); 3757 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3758 MAP_ENTRY_WIRE_SKIPPED); 3759 entry->wiring_thread = NULL; 3760 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3761 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3762 need_wakeup = true; 3763 } 3764 vm_map_try_merge_entries(map, prev_entry, entry); 3765 } 3766 vm_map_try_merge_entries(map, prev_entry, entry); 3767 if (need_wakeup) 3768 vm_map_wakeup(map); 3769 return (rv); 3770 } 3771 3772 /* 3773 * vm_map_sync 3774 * 3775 * Push any dirty cached pages in the address range to their pager. 3776 * If syncio is TRUE, dirty pages are written synchronously. 3777 * If invalidate is TRUE, any cached pages are freed as well. 3778 * 3779 * If the size of the region from start to end is zero, we are 3780 * supposed to flush all modified pages within the region containing 3781 * start. Unfortunately, a region can be split or coalesced with 3782 * neighboring regions, making it difficult to determine what the 3783 * original region was. Therefore, we approximate this requirement by 3784 * flushing the current region containing start. 3785 * 3786 * Returns an error if any part of the specified range is not mapped. 3787 */ 3788 int 3789 vm_map_sync( 3790 vm_map_t map, 3791 vm_offset_t start, 3792 vm_offset_t end, 3793 boolean_t syncio, 3794 boolean_t invalidate) 3795 { 3796 vm_map_entry_t entry, first_entry, next_entry; 3797 vm_size_t size; 3798 vm_object_t object; 3799 vm_ooffset_t offset; 3800 unsigned int last_timestamp; 3801 int bdry_idx; 3802 boolean_t failed; 3803 3804 vm_map_lock_read(map); 3805 VM_MAP_RANGE_CHECK(map, start, end); 3806 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3807 vm_map_unlock_read(map); 3808 return (KERN_INVALID_ADDRESS); 3809 } else if (start == end) { 3810 start = first_entry->start; 3811 end = first_entry->end; 3812 } 3813 3814 /* 3815 * Make a first pass to check for user-wired memory, holes, 3816 * and partial invalidation of largepage mappings. 3817 */ 3818 for (entry = first_entry; entry->start < end; entry = next_entry) { 3819 if (invalidate) { 3820 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { 3821 vm_map_unlock_read(map); 3822 return (KERN_INVALID_ARGUMENT); 3823 } 3824 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3825 if (bdry_idx != 0 && 3826 ((start & (pagesizes[bdry_idx] - 1)) != 0 || 3827 (end & (pagesizes[bdry_idx] - 1)) != 0)) { 3828 vm_map_unlock_read(map); 3829 return (KERN_INVALID_ARGUMENT); 3830 } 3831 } 3832 next_entry = vm_map_entry_succ(entry); 3833 if (end > entry->end && 3834 entry->end != next_entry->start) { 3835 vm_map_unlock_read(map); 3836 return (KERN_INVALID_ADDRESS); 3837 } 3838 } 3839 3840 if (invalidate) 3841 pmap_remove(map->pmap, start, end); 3842 failed = FALSE; 3843 3844 /* 3845 * Make a second pass, cleaning/uncaching pages from the indicated 3846 * objects as we go. 3847 */ 3848 for (entry = first_entry; entry->start < end;) { 3849 offset = entry->offset + (start - entry->start); 3850 size = (end <= entry->end ? end : entry->end) - start; 3851 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 3852 vm_map_t smap; 3853 vm_map_entry_t tentry; 3854 vm_size_t tsize; 3855 3856 smap = entry->object.sub_map; 3857 vm_map_lock_read(smap); 3858 (void) vm_map_lookup_entry(smap, offset, &tentry); 3859 tsize = tentry->end - offset; 3860 if (tsize < size) 3861 size = tsize; 3862 object = tentry->object.vm_object; 3863 offset = tentry->offset + (offset - tentry->start); 3864 vm_map_unlock_read(smap); 3865 } else { 3866 object = entry->object.vm_object; 3867 } 3868 vm_object_reference(object); 3869 last_timestamp = map->timestamp; 3870 vm_map_unlock_read(map); 3871 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3872 failed = TRUE; 3873 start += size; 3874 vm_object_deallocate(object); 3875 vm_map_lock_read(map); 3876 if (last_timestamp == map->timestamp || 3877 !vm_map_lookup_entry(map, start, &entry)) 3878 entry = vm_map_entry_succ(entry); 3879 } 3880 3881 vm_map_unlock_read(map); 3882 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3883 } 3884 3885 /* 3886 * vm_map_entry_unwire: [ internal use only ] 3887 * 3888 * Make the region specified by this entry pageable. 3889 * 3890 * The map in question should be locked. 3891 * [This is the reason for this routine's existence.] 3892 */ 3893 static void 3894 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3895 { 3896 vm_size_t size; 3897 3898 VM_MAP_ASSERT_LOCKED(map); 3899 KASSERT(entry->wired_count > 0, 3900 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3901 3902 size = entry->end - entry->start; 3903 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) 3904 vm_map_wire_user_count_sub(atop(size)); 3905 pmap_unwire(map->pmap, entry->start, entry->end); 3906 vm_object_unwire(entry->object.vm_object, entry->offset, size, 3907 PQ_ACTIVE); 3908 entry->wired_count = 0; 3909 } 3910 3911 static void 3912 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3913 { 3914 3915 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3916 vm_object_deallocate(entry->object.vm_object); 3917 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3918 } 3919 3920 /* 3921 * vm_map_entry_delete: [ internal use only ] 3922 * 3923 * Deallocate the given entry from the target map. 3924 */ 3925 static void 3926 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3927 { 3928 vm_object_t object; 3929 vm_pindex_t offidxstart, offidxend, size1; 3930 vm_size_t size; 3931 3932 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3933 object = entry->object.vm_object; 3934 3935 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3936 MPASS(entry->cred == NULL); 3937 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3938 MPASS(object == NULL); 3939 vm_map_entry_deallocate(entry, map->system_map); 3940 return; 3941 } 3942 3943 size = entry->end - entry->start; 3944 map->size -= size; 3945 3946 if (entry->cred != NULL) { 3947 swap_release_by_cred(size, entry->cred); 3948 crfree(entry->cred); 3949 } 3950 3951 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { 3952 entry->object.vm_object = NULL; 3953 } else if ((object->flags & OBJ_ANON) != 0 || 3954 object == kernel_object) { 3955 KASSERT(entry->cred == NULL || object->cred == NULL || 3956 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3957 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3958 offidxstart = OFF_TO_IDX(entry->offset); 3959 offidxend = offidxstart + atop(size); 3960 VM_OBJECT_WLOCK(object); 3961 if (object->ref_count != 1 && 3962 ((object->flags & OBJ_ONEMAPPING) != 0 || 3963 object == kernel_object)) { 3964 vm_object_collapse(object); 3965 3966 /* 3967 * The option OBJPR_NOTMAPPED can be passed here 3968 * because vm_map_delete() already performed 3969 * pmap_remove() on the only mapping to this range 3970 * of pages. 3971 */ 3972 vm_object_page_remove(object, offidxstart, offidxend, 3973 OBJPR_NOTMAPPED); 3974 if (offidxend >= object->size && 3975 offidxstart < object->size) { 3976 size1 = object->size; 3977 object->size = offidxstart; 3978 if (object->cred != NULL) { 3979 size1 -= object->size; 3980 KASSERT(object->charge >= ptoa(size1), 3981 ("object %p charge < 0", object)); 3982 swap_release_by_cred(ptoa(size1), 3983 object->cred); 3984 object->charge -= ptoa(size1); 3985 } 3986 } 3987 } 3988 VM_OBJECT_WUNLOCK(object); 3989 } 3990 if (map->system_map) 3991 vm_map_entry_deallocate(entry, TRUE); 3992 else { 3993 entry->defer_next = curthread->td_map_def_user; 3994 curthread->td_map_def_user = entry; 3995 } 3996 } 3997 3998 /* 3999 * vm_map_delete: [ internal use only ] 4000 * 4001 * Deallocates the given address range from the target 4002 * map. 4003 */ 4004 int 4005 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 4006 { 4007 vm_map_entry_t entry, next_entry, scratch_entry; 4008 int rv; 4009 4010 VM_MAP_ASSERT_LOCKED(map); 4011 4012 if (start == end) 4013 return (KERN_SUCCESS); 4014 4015 /* 4016 * Find the start of the region, and clip it. 4017 * Step through all entries in this region. 4018 */ 4019 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry); 4020 if (rv != KERN_SUCCESS) 4021 return (rv); 4022 for (; entry->start < end; entry = next_entry) { 4023 /* 4024 * Wait for wiring or unwiring of an entry to complete. 4025 * Also wait for any system wirings to disappear on 4026 * user maps. 4027 */ 4028 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 4029 (vm_map_pmap(map) != kernel_pmap && 4030 vm_map_entry_system_wired_count(entry) != 0)) { 4031 unsigned int last_timestamp; 4032 vm_offset_t saved_start; 4033 4034 saved_start = entry->start; 4035 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4036 last_timestamp = map->timestamp; 4037 (void) vm_map_unlock_and_wait(map, 0); 4038 vm_map_lock(map); 4039 if (last_timestamp + 1 != map->timestamp) { 4040 /* 4041 * Look again for the entry because the map was 4042 * modified while it was unlocked. 4043 * Specifically, the entry may have been 4044 * clipped, merged, or deleted. 4045 */ 4046 rv = vm_map_lookup_clip_start(map, saved_start, 4047 &next_entry, &scratch_entry); 4048 if (rv != KERN_SUCCESS) 4049 break; 4050 } else 4051 next_entry = entry; 4052 continue; 4053 } 4054 4055 /* XXXKIB or delete to the upper superpage boundary ? */ 4056 rv = vm_map_clip_end(map, entry, end); 4057 if (rv != KERN_SUCCESS) 4058 break; 4059 next_entry = vm_map_entry_succ(entry); 4060 4061 /* 4062 * Unwire before removing addresses from the pmap; otherwise, 4063 * unwiring will put the entries back in the pmap. 4064 */ 4065 if (entry->wired_count != 0) 4066 vm_map_entry_unwire(map, entry); 4067 4068 /* 4069 * Remove mappings for the pages, but only if the 4070 * mappings could exist. For instance, it does not 4071 * make sense to call pmap_remove() for guard entries. 4072 */ 4073 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 4074 entry->object.vm_object != NULL) 4075 pmap_map_delete(map->pmap, entry->start, entry->end); 4076 4077 /* 4078 * Delete the entry only after removing all pmap 4079 * entries pointing to its pages. (Otherwise, its 4080 * page frames may be reallocated, and any modify bits 4081 * will be set in the wrong object!) 4082 */ 4083 vm_map_entry_delete(map, entry); 4084 } 4085 return (rv); 4086 } 4087 4088 /* 4089 * vm_map_remove: 4090 * 4091 * Remove the given address range from the target map. 4092 * This is the exported form of vm_map_delete. 4093 */ 4094 int 4095 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 4096 { 4097 int result; 4098 4099 vm_map_lock(map); 4100 VM_MAP_RANGE_CHECK(map, start, end); 4101 result = vm_map_delete(map, start, end); 4102 vm_map_unlock(map); 4103 return (result); 4104 } 4105 4106 /* 4107 * vm_map_check_protection: 4108 * 4109 * Assert that the target map allows the specified privilege on the 4110 * entire address region given. The entire region must be allocated. 4111 * 4112 * WARNING! This code does not and should not check whether the 4113 * contents of the region is accessible. For example a smaller file 4114 * might be mapped into a larger address space. 4115 * 4116 * NOTE! This code is also called by munmap(). 4117 * 4118 * The map must be locked. A read lock is sufficient. 4119 */ 4120 boolean_t 4121 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 4122 vm_prot_t protection) 4123 { 4124 vm_map_entry_t entry; 4125 vm_map_entry_t tmp_entry; 4126 4127 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 4128 return (FALSE); 4129 entry = tmp_entry; 4130 4131 while (start < end) { 4132 /* 4133 * No holes allowed! 4134 */ 4135 if (start < entry->start) 4136 return (FALSE); 4137 /* 4138 * Check protection associated with entry. 4139 */ 4140 if ((entry->protection & protection) != protection) 4141 return (FALSE); 4142 /* go to next entry */ 4143 start = entry->end; 4144 entry = vm_map_entry_succ(entry); 4145 } 4146 return (TRUE); 4147 } 4148 4149 /* 4150 * 4151 * vm_map_copy_swap_object: 4152 * 4153 * Copies a swap-backed object from an existing map entry to a 4154 * new one. Carries forward the swap charge. May change the 4155 * src object on return. 4156 */ 4157 static void 4158 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, 4159 vm_offset_t size, vm_ooffset_t *fork_charge) 4160 { 4161 vm_object_t src_object; 4162 struct ucred *cred; 4163 int charged; 4164 4165 src_object = src_entry->object.vm_object; 4166 charged = ENTRY_CHARGED(src_entry); 4167 if ((src_object->flags & OBJ_ANON) != 0) { 4168 VM_OBJECT_WLOCK(src_object); 4169 vm_object_collapse(src_object); 4170 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { 4171 vm_object_split(src_entry); 4172 src_object = src_entry->object.vm_object; 4173 } 4174 vm_object_reference_locked(src_object); 4175 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 4176 VM_OBJECT_WUNLOCK(src_object); 4177 } else 4178 vm_object_reference(src_object); 4179 if (src_entry->cred != NULL && 4180 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4181 KASSERT(src_object->cred == NULL, 4182 ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p", 4183 src_object)); 4184 src_object->cred = src_entry->cred; 4185 src_object->charge = size; 4186 } 4187 dst_entry->object.vm_object = src_object; 4188 if (charged) { 4189 cred = curthread->td_ucred; 4190 crhold(cred); 4191 dst_entry->cred = cred; 4192 *fork_charge += size; 4193 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4194 crhold(cred); 4195 src_entry->cred = cred; 4196 *fork_charge += size; 4197 } 4198 } 4199 } 4200 4201 /* 4202 * vm_map_copy_entry: 4203 * 4204 * Copies the contents of the source entry to the destination 4205 * entry. The entries *must* be aligned properly. 4206 */ 4207 static void 4208 vm_map_copy_entry( 4209 vm_map_t src_map, 4210 vm_map_t dst_map, 4211 vm_map_entry_t src_entry, 4212 vm_map_entry_t dst_entry, 4213 vm_ooffset_t *fork_charge) 4214 { 4215 vm_object_t src_object; 4216 vm_map_entry_t fake_entry; 4217 vm_offset_t size; 4218 4219 VM_MAP_ASSERT_LOCKED(dst_map); 4220 4221 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 4222 return; 4223 4224 if (src_entry->wired_count == 0 || 4225 (src_entry->protection & VM_PROT_WRITE) == 0) { 4226 /* 4227 * If the source entry is marked needs_copy, it is already 4228 * write-protected. 4229 */ 4230 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 4231 (src_entry->protection & VM_PROT_WRITE) != 0) { 4232 pmap_protect(src_map->pmap, 4233 src_entry->start, 4234 src_entry->end, 4235 src_entry->protection & ~VM_PROT_WRITE); 4236 } 4237 4238 /* 4239 * Make a copy of the object. 4240 */ 4241 size = src_entry->end - src_entry->start; 4242 if ((src_object = src_entry->object.vm_object) != NULL) { 4243 if ((src_object->flags & OBJ_SWAP) != 0) { 4244 vm_map_copy_swap_object(src_entry, dst_entry, 4245 size, fork_charge); 4246 /* May have split/collapsed, reload obj. */ 4247 src_object = src_entry->object.vm_object; 4248 } else { 4249 vm_object_reference(src_object); 4250 dst_entry->object.vm_object = src_object; 4251 } 4252 src_entry->eflags |= MAP_ENTRY_COW | 4253 MAP_ENTRY_NEEDS_COPY; 4254 dst_entry->eflags |= MAP_ENTRY_COW | 4255 MAP_ENTRY_NEEDS_COPY; 4256 dst_entry->offset = src_entry->offset; 4257 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { 4258 /* 4259 * MAP_ENTRY_WRITECNT cannot 4260 * indicate write reference from 4261 * src_entry, since the entry is 4262 * marked as needs copy. Allocate a 4263 * fake entry that is used to 4264 * decrement object->un_pager writecount 4265 * at the appropriate time. Attach 4266 * fake_entry to the deferred list. 4267 */ 4268 fake_entry = vm_map_entry_create(dst_map); 4269 fake_entry->eflags = MAP_ENTRY_WRITECNT; 4270 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; 4271 vm_object_reference(src_object); 4272 fake_entry->object.vm_object = src_object; 4273 fake_entry->start = src_entry->start; 4274 fake_entry->end = src_entry->end; 4275 fake_entry->defer_next = 4276 curthread->td_map_def_user; 4277 curthread->td_map_def_user = fake_entry; 4278 } 4279 4280 pmap_copy(dst_map->pmap, src_map->pmap, 4281 dst_entry->start, dst_entry->end - dst_entry->start, 4282 src_entry->start); 4283 } else { 4284 dst_entry->object.vm_object = NULL; 4285 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) 4286 dst_entry->offset = 0; 4287 if (src_entry->cred != NULL) { 4288 dst_entry->cred = curthread->td_ucred; 4289 crhold(dst_entry->cred); 4290 *fork_charge += size; 4291 } 4292 } 4293 } else { 4294 /* 4295 * We don't want to make writeable wired pages copy-on-write. 4296 * Immediately copy these pages into the new map by simulating 4297 * page faults. The new pages are pageable. 4298 */ 4299 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 4300 fork_charge); 4301 } 4302 } 4303 4304 /* 4305 * vmspace_map_entry_forked: 4306 * Update the newly-forked vmspace each time a map entry is inherited 4307 * or copied. The values for vm_dsize and vm_tsize are approximate 4308 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 4309 */ 4310 static void 4311 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 4312 vm_map_entry_t entry) 4313 { 4314 vm_size_t entrysize; 4315 vm_offset_t newend; 4316 4317 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 4318 return; 4319 entrysize = entry->end - entry->start; 4320 vm2->vm_map.size += entrysize; 4321 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 4322 vm2->vm_ssize += btoc(entrysize); 4323 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 4324 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 4325 newend = MIN(entry->end, 4326 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 4327 vm2->vm_dsize += btoc(newend - entry->start); 4328 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 4329 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 4330 newend = MIN(entry->end, 4331 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 4332 vm2->vm_tsize += btoc(newend - entry->start); 4333 } 4334 } 4335 4336 /* 4337 * vmspace_fork: 4338 * Create a new process vmspace structure and vm_map 4339 * based on those of an existing process. The new map 4340 * is based on the old map, according to the inheritance 4341 * values on the regions in that map. 4342 * 4343 * XXX It might be worth coalescing the entries added to the new vmspace. 4344 * 4345 * The source map must not be locked. 4346 */ 4347 struct vmspace * 4348 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 4349 { 4350 struct vmspace *vm2; 4351 vm_map_t new_map, old_map; 4352 vm_map_entry_t new_entry, old_entry; 4353 vm_object_t object; 4354 int error, locked __diagused; 4355 vm_inherit_t inh; 4356 4357 old_map = &vm1->vm_map; 4358 /* Copy immutable fields of vm1 to vm2. */ 4359 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 4360 pmap_pinit); 4361 if (vm2 == NULL) 4362 return (NULL); 4363 4364 vm2->vm_taddr = vm1->vm_taddr; 4365 vm2->vm_daddr = vm1->vm_daddr; 4366 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 4367 vm2->vm_stacktop = vm1->vm_stacktop; 4368 vm2->vm_shp_base = vm1->vm_shp_base; 4369 vm_map_lock(old_map); 4370 if (old_map->busy) 4371 vm_map_wait_busy(old_map); 4372 new_map = &vm2->vm_map; 4373 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 4374 KASSERT(locked, ("vmspace_fork: lock failed")); 4375 4376 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 4377 if (error != 0) { 4378 sx_xunlock(&old_map->lock); 4379 sx_xunlock(&new_map->lock); 4380 vm_map_process_deferred(); 4381 vmspace_free(vm2); 4382 return (NULL); 4383 } 4384 4385 new_map->anon_loc = old_map->anon_loc; 4386 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | 4387 MAP_ASLR_STACK | MAP_WXORX); 4388 4389 VM_MAP_ENTRY_FOREACH(old_entry, old_map) { 4390 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 4391 panic("vm_map_fork: encountered a submap"); 4392 4393 inh = old_entry->inheritance; 4394 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 4395 inh != VM_INHERIT_NONE) 4396 inh = VM_INHERIT_COPY; 4397 4398 switch (inh) { 4399 case VM_INHERIT_NONE: 4400 break; 4401 4402 case VM_INHERIT_SHARE: 4403 /* 4404 * Clone the entry, creating the shared object if 4405 * necessary. 4406 */ 4407 object = old_entry->object.vm_object; 4408 if (object == NULL) { 4409 vm_map_entry_back(old_entry); 4410 object = old_entry->object.vm_object; 4411 } 4412 4413 /* 4414 * Add the reference before calling vm_object_shadow 4415 * to insure that a shadow object is created. 4416 */ 4417 vm_object_reference(object); 4418 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4419 vm_object_shadow(&old_entry->object.vm_object, 4420 &old_entry->offset, 4421 old_entry->end - old_entry->start, 4422 old_entry->cred, 4423 /* Transfer the second reference too. */ 4424 true); 4425 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4426 old_entry->cred = NULL; 4427 4428 /* 4429 * As in vm_map_merged_neighbor_dispose(), 4430 * the vnode lock will not be acquired in 4431 * this call to vm_object_deallocate(). 4432 */ 4433 vm_object_deallocate(object); 4434 object = old_entry->object.vm_object; 4435 } else { 4436 VM_OBJECT_WLOCK(object); 4437 vm_object_clear_flag(object, OBJ_ONEMAPPING); 4438 if (old_entry->cred != NULL) { 4439 KASSERT(object->cred == NULL, 4440 ("vmspace_fork both cred")); 4441 object->cred = old_entry->cred; 4442 object->charge = old_entry->end - 4443 old_entry->start; 4444 old_entry->cred = NULL; 4445 } 4446 4447 /* 4448 * Assert the correct state of the vnode 4449 * v_writecount while the object is locked, to 4450 * not relock it later for the assertion 4451 * correctness. 4452 */ 4453 if (old_entry->eflags & MAP_ENTRY_WRITECNT && 4454 object->type == OBJT_VNODE) { 4455 KASSERT(((struct vnode *)object-> 4456 handle)->v_writecount > 0, 4457 ("vmspace_fork: v_writecount %p", 4458 object)); 4459 KASSERT(object->un_pager.vnp. 4460 writemappings > 0, 4461 ("vmspace_fork: vnp.writecount %p", 4462 object)); 4463 } 4464 VM_OBJECT_WUNLOCK(object); 4465 } 4466 4467 /* 4468 * Clone the entry, referencing the shared object. 4469 */ 4470 new_entry = vm_map_entry_create(new_map); 4471 *new_entry = *old_entry; 4472 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4473 MAP_ENTRY_IN_TRANSITION); 4474 new_entry->wiring_thread = NULL; 4475 new_entry->wired_count = 0; 4476 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { 4477 vm_pager_update_writecount(object, 4478 new_entry->start, new_entry->end); 4479 } 4480 vm_map_entry_set_vnode_text(new_entry, true); 4481 4482 /* 4483 * Insert the entry into the new map -- we know we're 4484 * inserting at the end of the new map. 4485 */ 4486 vm_map_entry_link(new_map, new_entry); 4487 vmspace_map_entry_forked(vm1, vm2, new_entry); 4488 4489 /* 4490 * Update the physical map 4491 */ 4492 pmap_copy(new_map->pmap, old_map->pmap, 4493 new_entry->start, 4494 (old_entry->end - old_entry->start), 4495 old_entry->start); 4496 break; 4497 4498 case VM_INHERIT_COPY: 4499 /* 4500 * Clone the entry and link into the map. 4501 */ 4502 new_entry = vm_map_entry_create(new_map); 4503 *new_entry = *old_entry; 4504 /* 4505 * Copied entry is COW over the old object. 4506 */ 4507 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4508 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT); 4509 new_entry->wiring_thread = NULL; 4510 new_entry->wired_count = 0; 4511 new_entry->object.vm_object = NULL; 4512 new_entry->cred = NULL; 4513 vm_map_entry_link(new_map, new_entry); 4514 vmspace_map_entry_forked(vm1, vm2, new_entry); 4515 vm_map_copy_entry(old_map, new_map, old_entry, 4516 new_entry, fork_charge); 4517 vm_map_entry_set_vnode_text(new_entry, true); 4518 break; 4519 4520 case VM_INHERIT_ZERO: 4521 /* 4522 * Create a new anonymous mapping entry modelled from 4523 * the old one. 4524 */ 4525 new_entry = vm_map_entry_create(new_map); 4526 memset(new_entry, 0, sizeof(*new_entry)); 4527 4528 new_entry->start = old_entry->start; 4529 new_entry->end = old_entry->end; 4530 new_entry->eflags = old_entry->eflags & 4531 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 4532 MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC | 4533 MAP_ENTRY_SPLIT_BOUNDARY_MASK); 4534 new_entry->protection = old_entry->protection; 4535 new_entry->max_protection = old_entry->max_protection; 4536 new_entry->inheritance = VM_INHERIT_ZERO; 4537 4538 vm_map_entry_link(new_map, new_entry); 4539 vmspace_map_entry_forked(vm1, vm2, new_entry); 4540 4541 new_entry->cred = curthread->td_ucred; 4542 crhold(new_entry->cred); 4543 *fork_charge += (new_entry->end - new_entry->start); 4544 4545 break; 4546 } 4547 } 4548 /* 4549 * Use inlined vm_map_unlock() to postpone handling the deferred 4550 * map entries, which cannot be done until both old_map and 4551 * new_map locks are released. 4552 */ 4553 sx_xunlock(&old_map->lock); 4554 sx_xunlock(&new_map->lock); 4555 vm_map_process_deferred(); 4556 4557 return (vm2); 4558 } 4559 4560 /* 4561 * Create a process's stack for exec_new_vmspace(). This function is never 4562 * asked to wire the newly created stack. 4563 */ 4564 int 4565 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4566 vm_prot_t prot, vm_prot_t max, int cow) 4567 { 4568 vm_size_t growsize, init_ssize; 4569 rlim_t vmemlim; 4570 int rv; 4571 4572 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4573 growsize = sgrowsiz; 4574 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4575 vm_map_lock(map); 4576 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4577 /* If we would blow our VMEM resource limit, no go */ 4578 if (map->size + init_ssize > vmemlim) { 4579 rv = KERN_NO_SPACE; 4580 goto out; 4581 } 4582 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4583 max, cow); 4584 out: 4585 vm_map_unlock(map); 4586 return (rv); 4587 } 4588 4589 static int stack_guard_page = 1; 4590 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4591 &stack_guard_page, 0, 4592 "Specifies the number of guard pages for a stack that grows"); 4593 4594 static int 4595 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4596 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4597 { 4598 vm_map_entry_t gap_entry, new_entry, prev_entry; 4599 vm_offset_t bot, gap_bot, gap_top, top; 4600 vm_size_t init_ssize, sgp; 4601 int orient, rv; 4602 4603 /* 4604 * The stack orientation is piggybacked with the cow argument. 4605 * Extract it into orient and mask the cow argument so that we 4606 * don't pass it around further. 4607 */ 4608 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 4609 KASSERT(orient != 0, ("No stack grow direction")); 4610 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 4611 ("bi-dir stack")); 4612 4613 if (max_ssize == 0 || 4614 !vm_map_range_valid(map, addrbos, addrbos + max_ssize)) 4615 return (KERN_INVALID_ADDRESS); 4616 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4617 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4618 (vm_size_t)stack_guard_page * PAGE_SIZE; 4619 if (sgp >= max_ssize) 4620 return (KERN_INVALID_ARGUMENT); 4621 4622 init_ssize = growsize; 4623 if (max_ssize < init_ssize + sgp) 4624 init_ssize = max_ssize - sgp; 4625 4626 /* If addr is already mapped, no go */ 4627 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4628 return (KERN_NO_SPACE); 4629 4630 /* 4631 * If we can't accommodate max_ssize in the current mapping, no go. 4632 */ 4633 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) 4634 return (KERN_NO_SPACE); 4635 4636 /* 4637 * We initially map a stack of only init_ssize. We will grow as 4638 * needed later. Depending on the orientation of the stack (i.e. 4639 * the grow direction) we either map at the top of the range, the 4640 * bottom of the range or in the middle. 4641 * 4642 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4643 * and cow to be 0. Possibly we should eliminate these as input 4644 * parameters, and just pass these values here in the insert call. 4645 */ 4646 if (orient == MAP_STACK_GROWS_DOWN) { 4647 bot = addrbos + max_ssize - init_ssize; 4648 top = bot + init_ssize; 4649 gap_bot = addrbos; 4650 gap_top = bot; 4651 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 4652 bot = addrbos; 4653 top = bot + init_ssize; 4654 gap_bot = top; 4655 gap_top = addrbos + max_ssize; 4656 } 4657 rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow, 4658 &new_entry); 4659 if (rv != KERN_SUCCESS) 4660 return (rv); 4661 KASSERT(new_entry->end == top || new_entry->start == bot, 4662 ("Bad entry start/end for new stack entry")); 4663 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 4664 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4665 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4666 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 4667 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 4668 ("new entry lacks MAP_ENTRY_GROWS_UP")); 4669 if (gap_bot == gap_top) 4670 return (KERN_SUCCESS); 4671 rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4672 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 4673 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry); 4674 if (rv == KERN_SUCCESS) { 4675 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, 4676 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); 4677 KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4678 MAP_ENTRY_STACK_GAP_UP)) != 0, 4679 ("entry %p not stack gap %#x", gap_entry, 4680 gap_entry->eflags)); 4681 4682 /* 4683 * Gap can never successfully handle a fault, so 4684 * read-ahead logic is never used for it. Re-use 4685 * next_read of the gap entry to store 4686 * stack_guard_page for vm_map_growstack(). 4687 * Similarly, since a gap cannot have a backing object, 4688 * store the original stack protections in the 4689 * object offset. 4690 */ 4691 gap_entry->next_read = sgp; 4692 gap_entry->offset = prot | PROT_MAX(max); 4693 } else { 4694 (void)vm_map_delete(map, bot, top); 4695 } 4696 return (rv); 4697 } 4698 4699 /* 4700 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4701 * successfully grow the stack. 4702 */ 4703 static int 4704 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4705 { 4706 vm_map_entry_t stack_entry; 4707 struct proc *p; 4708 struct vmspace *vm; 4709 struct ucred *cred; 4710 vm_offset_t gap_end, gap_start, grow_start; 4711 vm_size_t grow_amount, guard, max_grow, sgp; 4712 vm_prot_t prot, max; 4713 rlim_t lmemlim, stacklim, vmemlim; 4714 int rv, rv1 __diagused; 4715 bool gap_deleted, grow_down, is_procstack; 4716 #ifdef notyet 4717 uint64_t limit; 4718 #endif 4719 #ifdef RACCT 4720 int error __diagused; 4721 #endif 4722 4723 p = curproc; 4724 vm = p->p_vmspace; 4725 4726 /* 4727 * Disallow stack growth when the access is performed by a 4728 * debugger or AIO daemon. The reason is that the wrong 4729 * resource limits are applied. 4730 */ 4731 if (p != initproc && (map != &p->p_vmspace->vm_map || 4732 p->p_textvp == NULL)) 4733 return (KERN_FAILURE); 4734 4735 MPASS(!map->system_map); 4736 4737 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4738 stacklim = lim_cur(curthread, RLIMIT_STACK); 4739 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4740 retry: 4741 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4742 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4743 return (KERN_FAILURE); 4744 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4745 return (KERN_SUCCESS); 4746 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 4747 stack_entry = vm_map_entry_succ(gap_entry); 4748 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4749 stack_entry->start != gap_entry->end) 4750 return (KERN_FAILURE); 4751 grow_amount = round_page(stack_entry->start - addr); 4752 grow_down = true; 4753 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 4754 stack_entry = vm_map_entry_pred(gap_entry); 4755 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 4756 stack_entry->end != gap_entry->start) 4757 return (KERN_FAILURE); 4758 grow_amount = round_page(addr + 1 - stack_entry->end); 4759 grow_down = false; 4760 } else { 4761 return (KERN_FAILURE); 4762 } 4763 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4764 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4765 gap_entry->next_read; 4766 max_grow = gap_entry->end - gap_entry->start; 4767 if (guard > max_grow) 4768 return (KERN_NO_SPACE); 4769 max_grow -= guard; 4770 if (grow_amount > max_grow) 4771 return (KERN_NO_SPACE); 4772 4773 /* 4774 * If this is the main process stack, see if we're over the stack 4775 * limit. 4776 */ 4777 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4778 addr < (vm_offset_t)vm->vm_stacktop; 4779 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4780 return (KERN_NO_SPACE); 4781 4782 #ifdef RACCT 4783 if (racct_enable) { 4784 PROC_LOCK(p); 4785 if (is_procstack && racct_set(p, RACCT_STACK, 4786 ctob(vm->vm_ssize) + grow_amount)) { 4787 PROC_UNLOCK(p); 4788 return (KERN_NO_SPACE); 4789 } 4790 PROC_UNLOCK(p); 4791 } 4792 #endif 4793 4794 grow_amount = roundup(grow_amount, sgrowsiz); 4795 if (grow_amount > max_grow) 4796 grow_amount = max_grow; 4797 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4798 grow_amount = trunc_page((vm_size_t)stacklim) - 4799 ctob(vm->vm_ssize); 4800 } 4801 4802 #ifdef notyet 4803 PROC_LOCK(p); 4804 limit = racct_get_available(p, RACCT_STACK); 4805 PROC_UNLOCK(p); 4806 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4807 grow_amount = limit - ctob(vm->vm_ssize); 4808 #endif 4809 4810 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4811 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4812 rv = KERN_NO_SPACE; 4813 goto out; 4814 } 4815 #ifdef RACCT 4816 if (racct_enable) { 4817 PROC_LOCK(p); 4818 if (racct_set(p, RACCT_MEMLOCK, 4819 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4820 PROC_UNLOCK(p); 4821 rv = KERN_NO_SPACE; 4822 goto out; 4823 } 4824 PROC_UNLOCK(p); 4825 } 4826 #endif 4827 } 4828 4829 /* If we would blow our VMEM resource limit, no go */ 4830 if (map->size + grow_amount > vmemlim) { 4831 rv = KERN_NO_SPACE; 4832 goto out; 4833 } 4834 #ifdef RACCT 4835 if (racct_enable) { 4836 PROC_LOCK(p); 4837 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4838 PROC_UNLOCK(p); 4839 rv = KERN_NO_SPACE; 4840 goto out; 4841 } 4842 PROC_UNLOCK(p); 4843 } 4844 #endif 4845 4846 if (vm_map_lock_upgrade(map)) { 4847 gap_entry = NULL; 4848 vm_map_lock_read(map); 4849 goto retry; 4850 } 4851 4852 if (grow_down) { 4853 /* 4854 * The gap_entry "offset" field is overloaded. See 4855 * vm_map_stack_locked(). 4856 */ 4857 prot = PROT_EXTRACT(gap_entry->offset); 4858 max = PROT_MAX_EXTRACT(gap_entry->offset); 4859 sgp = gap_entry->next_read; 4860 4861 grow_start = gap_entry->end - grow_amount; 4862 if (gap_entry->start + grow_amount == gap_entry->end) { 4863 gap_start = gap_entry->start; 4864 gap_end = gap_entry->end; 4865 vm_map_entry_delete(map, gap_entry); 4866 gap_deleted = true; 4867 } else { 4868 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4869 vm_map_entry_resize(map, gap_entry, -grow_amount); 4870 gap_deleted = false; 4871 } 4872 rv = vm_map_insert(map, NULL, 0, grow_start, 4873 grow_start + grow_amount, prot, max, MAP_STACK_GROWS_DOWN); 4874 if (rv != KERN_SUCCESS) { 4875 if (gap_deleted) { 4876 rv1 = vm_map_insert1(map, NULL, 0, gap_start, 4877 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4878 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN, 4879 &gap_entry); 4880 MPASS(rv1 == KERN_SUCCESS); 4881 gap_entry->next_read = sgp; 4882 gap_entry->offset = prot | PROT_MAX(max); 4883 } else 4884 vm_map_entry_resize(map, gap_entry, 4885 grow_amount); 4886 } 4887 } else { 4888 grow_start = stack_entry->end; 4889 cred = stack_entry->cred; 4890 if (cred == NULL && stack_entry->object.vm_object != NULL) 4891 cred = stack_entry->object.vm_object->cred; 4892 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 4893 rv = KERN_NO_SPACE; 4894 /* Grow the underlying object if applicable. */ 4895 else if (stack_entry->object.vm_object == NULL || 4896 vm_object_coalesce(stack_entry->object.vm_object, 4897 stack_entry->offset, 4898 (vm_size_t)(stack_entry->end - stack_entry->start), 4899 grow_amount, cred != NULL)) { 4900 if (gap_entry->start + grow_amount == gap_entry->end) { 4901 vm_map_entry_delete(map, gap_entry); 4902 vm_map_entry_resize(map, stack_entry, 4903 grow_amount); 4904 } else { 4905 gap_entry->start += grow_amount; 4906 stack_entry->end += grow_amount; 4907 } 4908 map->size += grow_amount; 4909 rv = KERN_SUCCESS; 4910 } else 4911 rv = KERN_FAILURE; 4912 } 4913 if (rv == KERN_SUCCESS && is_procstack) 4914 vm->vm_ssize += btoc(grow_amount); 4915 4916 /* 4917 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4918 */ 4919 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4920 rv = vm_map_wire_locked(map, grow_start, 4921 grow_start + grow_amount, 4922 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4923 } 4924 vm_map_lock_downgrade(map); 4925 4926 out: 4927 #ifdef RACCT 4928 if (racct_enable && rv != KERN_SUCCESS) { 4929 PROC_LOCK(p); 4930 error = racct_set(p, RACCT_VMEM, map->size); 4931 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4932 if (!old_mlock) { 4933 error = racct_set(p, RACCT_MEMLOCK, 4934 ptoa(pmap_wired_count(map->pmap))); 4935 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4936 } 4937 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4938 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4939 PROC_UNLOCK(p); 4940 } 4941 #endif 4942 4943 return (rv); 4944 } 4945 4946 /* 4947 * Unshare the specified VM space for exec. If other processes are 4948 * mapped to it, then create a new one. The new vmspace is null. 4949 */ 4950 int 4951 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4952 { 4953 struct vmspace *oldvmspace = p->p_vmspace; 4954 struct vmspace *newvmspace; 4955 4956 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4957 ("vmspace_exec recursed")); 4958 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4959 if (newvmspace == NULL) 4960 return (ENOMEM); 4961 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4962 /* 4963 * This code is written like this for prototype purposes. The 4964 * goal is to avoid running down the vmspace here, but let the 4965 * other process's that are still using the vmspace to finally 4966 * run it down. Even though there is little or no chance of blocking 4967 * here, it is a good idea to keep this form for future mods. 4968 */ 4969 PROC_VMSPACE_LOCK(p); 4970 p->p_vmspace = newvmspace; 4971 PROC_VMSPACE_UNLOCK(p); 4972 if (p == curthread->td_proc) 4973 pmap_activate(curthread); 4974 curthread->td_pflags |= TDP_EXECVMSPC; 4975 return (0); 4976 } 4977 4978 /* 4979 * Unshare the specified VM space for forcing COW. This 4980 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4981 */ 4982 int 4983 vmspace_unshare(struct proc *p) 4984 { 4985 struct vmspace *oldvmspace = p->p_vmspace; 4986 struct vmspace *newvmspace; 4987 vm_ooffset_t fork_charge; 4988 4989 /* 4990 * The caller is responsible for ensuring that the reference count 4991 * cannot concurrently transition 1 -> 2. 4992 */ 4993 if (refcount_load(&oldvmspace->vm_refcnt) == 1) 4994 return (0); 4995 fork_charge = 0; 4996 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4997 if (newvmspace == NULL) 4998 return (ENOMEM); 4999 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 5000 vmspace_free(newvmspace); 5001 return (ENOMEM); 5002 } 5003 PROC_VMSPACE_LOCK(p); 5004 p->p_vmspace = newvmspace; 5005 PROC_VMSPACE_UNLOCK(p); 5006 if (p == curthread->td_proc) 5007 pmap_activate(curthread); 5008 vmspace_free(oldvmspace); 5009 return (0); 5010 } 5011 5012 /* 5013 * vm_map_lookup: 5014 * 5015 * Finds the VM object, offset, and 5016 * protection for a given virtual address in the 5017 * specified map, assuming a page fault of the 5018 * type specified. 5019 * 5020 * Leaves the map in question locked for read; return 5021 * values are guaranteed until a vm_map_lookup_done 5022 * call is performed. Note that the map argument 5023 * is in/out; the returned map must be used in 5024 * the call to vm_map_lookup_done. 5025 * 5026 * A handle (out_entry) is returned for use in 5027 * vm_map_lookup_done, to make that fast. 5028 * 5029 * If a lookup is requested with "write protection" 5030 * specified, the map may be changed to perform virtual 5031 * copying operations, although the data referenced will 5032 * remain the same. 5033 */ 5034 int 5035 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 5036 vm_offset_t vaddr, 5037 vm_prot_t fault_typea, 5038 vm_map_entry_t *out_entry, /* OUT */ 5039 vm_object_t *object, /* OUT */ 5040 vm_pindex_t *pindex, /* OUT */ 5041 vm_prot_t *out_prot, /* OUT */ 5042 boolean_t *wired) /* OUT */ 5043 { 5044 vm_map_entry_t entry; 5045 vm_map_t map = *var_map; 5046 vm_prot_t prot; 5047 vm_prot_t fault_type; 5048 vm_object_t eobject; 5049 vm_size_t size; 5050 struct ucred *cred; 5051 5052 RetryLookup: 5053 5054 vm_map_lock_read(map); 5055 5056 RetryLookupLocked: 5057 /* 5058 * Lookup the faulting address. 5059 */ 5060 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 5061 vm_map_unlock_read(map); 5062 return (KERN_INVALID_ADDRESS); 5063 } 5064 5065 entry = *out_entry; 5066 5067 /* 5068 * Handle submaps. 5069 */ 5070 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5071 vm_map_t old_map = map; 5072 5073 *var_map = map = entry->object.sub_map; 5074 vm_map_unlock_read(old_map); 5075 goto RetryLookup; 5076 } 5077 5078 /* 5079 * Check whether this task is allowed to have this page. 5080 */ 5081 prot = entry->protection; 5082 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 5083 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 5084 if (prot == VM_PROT_NONE && map != kernel_map && 5085 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 5086 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 5087 MAP_ENTRY_STACK_GAP_UP)) != 0 && 5088 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 5089 goto RetryLookupLocked; 5090 } 5091 fault_type = fault_typea & VM_PROT_ALL; 5092 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 5093 vm_map_unlock_read(map); 5094 return (KERN_PROTECTION_FAILURE); 5095 } 5096 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 5097 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 5098 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 5099 ("entry %p flags %x", entry, entry->eflags)); 5100 if ((fault_typea & VM_PROT_COPY) != 0 && 5101 (entry->max_protection & VM_PROT_WRITE) == 0 && 5102 (entry->eflags & MAP_ENTRY_COW) == 0) { 5103 vm_map_unlock_read(map); 5104 return (KERN_PROTECTION_FAILURE); 5105 } 5106 5107 /* 5108 * If this page is not pageable, we have to get it for all possible 5109 * accesses. 5110 */ 5111 *wired = (entry->wired_count != 0); 5112 if (*wired) 5113 fault_type = entry->protection; 5114 size = entry->end - entry->start; 5115 5116 /* 5117 * If the entry was copy-on-write, we either ... 5118 */ 5119 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5120 /* 5121 * If we want to write the page, we may as well handle that 5122 * now since we've got the map locked. 5123 * 5124 * If we don't need to write the page, we just demote the 5125 * permissions allowed. 5126 */ 5127 if ((fault_type & VM_PROT_WRITE) != 0 || 5128 (fault_typea & VM_PROT_COPY) != 0) { 5129 /* 5130 * Make a new object, and place it in the object 5131 * chain. Note that no new references have appeared 5132 * -- one just moved from the map to the new 5133 * object. 5134 */ 5135 if (vm_map_lock_upgrade(map)) 5136 goto RetryLookup; 5137 5138 if (entry->cred == NULL) { 5139 /* 5140 * The debugger owner is charged for 5141 * the memory. 5142 */ 5143 cred = curthread->td_ucred; 5144 crhold(cred); 5145 if (!swap_reserve_by_cred(size, cred)) { 5146 crfree(cred); 5147 vm_map_unlock(map); 5148 return (KERN_RESOURCE_SHORTAGE); 5149 } 5150 entry->cred = cred; 5151 } 5152 eobject = entry->object.vm_object; 5153 vm_object_shadow(&entry->object.vm_object, 5154 &entry->offset, size, entry->cred, false); 5155 if (eobject == entry->object.vm_object) { 5156 /* 5157 * The object was not shadowed. 5158 */ 5159 swap_release_by_cred(size, entry->cred); 5160 crfree(entry->cred); 5161 } 5162 entry->cred = NULL; 5163 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 5164 5165 vm_map_lock_downgrade(map); 5166 } else { 5167 /* 5168 * We're attempting to read a copy-on-write page -- 5169 * don't allow writes. 5170 */ 5171 prot &= ~VM_PROT_WRITE; 5172 } 5173 } 5174 5175 /* 5176 * Create an object if necessary. 5177 */ 5178 if (entry->object.vm_object == NULL && !map->system_map) { 5179 if (vm_map_lock_upgrade(map)) 5180 goto RetryLookup; 5181 entry->object.vm_object = vm_object_allocate_anon(atop(size), 5182 NULL, entry->cred, size); 5183 entry->offset = 0; 5184 entry->cred = NULL; 5185 vm_map_lock_downgrade(map); 5186 } 5187 5188 /* 5189 * Return the object/offset from this entry. If the entry was 5190 * copy-on-write or empty, it has been fixed up. 5191 */ 5192 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5193 *object = entry->object.vm_object; 5194 5195 *out_prot = prot; 5196 return (KERN_SUCCESS); 5197 } 5198 5199 /* 5200 * vm_map_lookup_locked: 5201 * 5202 * Lookup the faulting address. A version of vm_map_lookup that returns 5203 * KERN_FAILURE instead of blocking on map lock or memory allocation. 5204 */ 5205 int 5206 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 5207 vm_offset_t vaddr, 5208 vm_prot_t fault_typea, 5209 vm_map_entry_t *out_entry, /* OUT */ 5210 vm_object_t *object, /* OUT */ 5211 vm_pindex_t *pindex, /* OUT */ 5212 vm_prot_t *out_prot, /* OUT */ 5213 boolean_t *wired) /* OUT */ 5214 { 5215 vm_map_entry_t entry; 5216 vm_map_t map = *var_map; 5217 vm_prot_t prot; 5218 vm_prot_t fault_type = fault_typea; 5219 5220 /* 5221 * Lookup the faulting address. 5222 */ 5223 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 5224 return (KERN_INVALID_ADDRESS); 5225 5226 entry = *out_entry; 5227 5228 /* 5229 * Fail if the entry refers to a submap. 5230 */ 5231 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 5232 return (KERN_FAILURE); 5233 5234 /* 5235 * Check whether this task is allowed to have this page. 5236 */ 5237 prot = entry->protection; 5238 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 5239 if ((fault_type & prot) != fault_type) 5240 return (KERN_PROTECTION_FAILURE); 5241 5242 /* 5243 * If this page is not pageable, we have to get it for all possible 5244 * accesses. 5245 */ 5246 *wired = (entry->wired_count != 0); 5247 if (*wired) 5248 fault_type = entry->protection; 5249 5250 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5251 /* 5252 * Fail if the entry was copy-on-write for a write fault. 5253 */ 5254 if (fault_type & VM_PROT_WRITE) 5255 return (KERN_FAILURE); 5256 /* 5257 * We're attempting to read a copy-on-write page -- 5258 * don't allow writes. 5259 */ 5260 prot &= ~VM_PROT_WRITE; 5261 } 5262 5263 /* 5264 * Fail if an object should be created. 5265 */ 5266 if (entry->object.vm_object == NULL && !map->system_map) 5267 return (KERN_FAILURE); 5268 5269 /* 5270 * Return the object/offset from this entry. If the entry was 5271 * copy-on-write or empty, it has been fixed up. 5272 */ 5273 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5274 *object = entry->object.vm_object; 5275 5276 *out_prot = prot; 5277 return (KERN_SUCCESS); 5278 } 5279 5280 /* 5281 * vm_map_lookup_done: 5282 * 5283 * Releases locks acquired by a vm_map_lookup 5284 * (according to the handle returned by that lookup). 5285 */ 5286 void 5287 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 5288 { 5289 /* 5290 * Unlock the main-level map 5291 */ 5292 vm_map_unlock_read(map); 5293 } 5294 5295 vm_offset_t 5296 vm_map_max_KBI(const struct vm_map *map) 5297 { 5298 5299 return (vm_map_max(map)); 5300 } 5301 5302 vm_offset_t 5303 vm_map_min_KBI(const struct vm_map *map) 5304 { 5305 5306 return (vm_map_min(map)); 5307 } 5308 5309 pmap_t 5310 vm_map_pmap_KBI(vm_map_t map) 5311 { 5312 5313 return (map->pmap); 5314 } 5315 5316 bool 5317 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end) 5318 { 5319 5320 return (vm_map_range_valid(map, start, end)); 5321 } 5322 5323 #ifdef INVARIANTS 5324 static void 5325 _vm_map_assert_consistent(vm_map_t map, int check) 5326 { 5327 vm_map_entry_t entry, prev; 5328 vm_map_entry_t cur, header, lbound, ubound; 5329 vm_size_t max_left, max_right; 5330 5331 #ifdef DIAGNOSTIC 5332 ++map->nupdates; 5333 #endif 5334 if (enable_vmmap_check != check) 5335 return; 5336 5337 header = prev = &map->header; 5338 VM_MAP_ENTRY_FOREACH(entry, map) { 5339 KASSERT(prev->end <= entry->start, 5340 ("map %p prev->end = %jx, start = %jx", map, 5341 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5342 KASSERT(entry->start < entry->end, 5343 ("map %p start = %jx, end = %jx", map, 5344 (uintmax_t)entry->start, (uintmax_t)entry->end)); 5345 KASSERT(entry->left == header || 5346 entry->left->start < entry->start, 5347 ("map %p left->start = %jx, start = %jx", map, 5348 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 5349 KASSERT(entry->right == header || 5350 entry->start < entry->right->start, 5351 ("map %p start = %jx, right->start = %jx", map, 5352 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 5353 cur = map->root; 5354 lbound = ubound = header; 5355 for (;;) { 5356 if (entry->start < cur->start) { 5357 ubound = cur; 5358 cur = cur->left; 5359 KASSERT(cur != lbound, 5360 ("map %p cannot find %jx", 5361 map, (uintmax_t)entry->start)); 5362 } else if (cur->end <= entry->start) { 5363 lbound = cur; 5364 cur = cur->right; 5365 KASSERT(cur != ubound, 5366 ("map %p cannot find %jx", 5367 map, (uintmax_t)entry->start)); 5368 } else { 5369 KASSERT(cur == entry, 5370 ("map %p cannot find %jx", 5371 map, (uintmax_t)entry->start)); 5372 break; 5373 } 5374 } 5375 max_left = vm_map_entry_max_free_left(entry, lbound); 5376 max_right = vm_map_entry_max_free_right(entry, ubound); 5377 KASSERT(entry->max_free == vm_size_max(max_left, max_right), 5378 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 5379 (uintmax_t)entry->max_free, 5380 (uintmax_t)max_left, (uintmax_t)max_right)); 5381 prev = entry; 5382 } 5383 KASSERT(prev->end <= entry->start, 5384 ("map %p prev->end = %jx, start = %jx", map, 5385 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5386 } 5387 #endif 5388 5389 #include "opt_ddb.h" 5390 #ifdef DDB 5391 #include <sys/kernel.h> 5392 5393 #include <ddb/ddb.h> 5394 5395 static void 5396 vm_map_print(vm_map_t map) 5397 { 5398 vm_map_entry_t entry, prev; 5399 5400 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 5401 (void *)map, 5402 (void *)map->pmap, map->nentries, map->timestamp); 5403 5404 db_indent += 2; 5405 prev = &map->header; 5406 VM_MAP_ENTRY_FOREACH(entry, map) { 5407 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 5408 (void *)entry, (void *)entry->start, (void *)entry->end, 5409 entry->eflags); 5410 { 5411 static const char * const inheritance_name[4] = 5412 {"share", "copy", "none", "donate_copy"}; 5413 5414 db_iprintf(" prot=%x/%x/%s", 5415 entry->protection, 5416 entry->max_protection, 5417 inheritance_name[(int)(unsigned char) 5418 entry->inheritance]); 5419 if (entry->wired_count != 0) 5420 db_printf(", wired"); 5421 } 5422 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5423 db_printf(", share=%p, offset=0x%jx\n", 5424 (void *)entry->object.sub_map, 5425 (uintmax_t)entry->offset); 5426 if (prev == &map->header || 5427 prev->object.sub_map != 5428 entry->object.sub_map) { 5429 db_indent += 2; 5430 vm_map_print((vm_map_t)entry->object.sub_map); 5431 db_indent -= 2; 5432 } 5433 } else { 5434 if (entry->cred != NULL) 5435 db_printf(", ruid %d", entry->cred->cr_ruid); 5436 db_printf(", object=%p, offset=0x%jx", 5437 (void *)entry->object.vm_object, 5438 (uintmax_t)entry->offset); 5439 if (entry->object.vm_object && entry->object.vm_object->cred) 5440 db_printf(", obj ruid %d charge %jx", 5441 entry->object.vm_object->cred->cr_ruid, 5442 (uintmax_t)entry->object.vm_object->charge); 5443 if (entry->eflags & MAP_ENTRY_COW) 5444 db_printf(", copy (%s)", 5445 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 5446 db_printf("\n"); 5447 5448 if (prev == &map->header || 5449 prev->object.vm_object != 5450 entry->object.vm_object) { 5451 db_indent += 2; 5452 vm_object_print((db_expr_t)(intptr_t) 5453 entry->object.vm_object, 5454 0, 0, (char *)0); 5455 db_indent -= 2; 5456 } 5457 } 5458 prev = entry; 5459 } 5460 db_indent -= 2; 5461 } 5462 5463 DB_SHOW_COMMAND(map, map) 5464 { 5465 5466 if (!have_addr) { 5467 db_printf("usage: show map <addr>\n"); 5468 return; 5469 } 5470 vm_map_print((vm_map_t)addr); 5471 } 5472 5473 DB_SHOW_COMMAND(procvm, procvm) 5474 { 5475 struct proc *p; 5476 5477 if (have_addr) { 5478 p = db_lookup_proc(addr); 5479 } else { 5480 p = curproc; 5481 } 5482 5483 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 5484 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 5485 (void *)vmspace_pmap(p->p_vmspace)); 5486 5487 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 5488 } 5489 5490 #endif /* DDB */ 5491