1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/elf.h> 68 #include <sys/kernel.h> 69 #include <sys/ktr.h> 70 #include <sys/lock.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/vmmeter.h> 74 #include <sys/mman.h> 75 #include <sys/vnode.h> 76 #include <sys/racct.h> 77 #include <sys/resourcevar.h> 78 #include <sys/rwlock.h> 79 #include <sys/file.h> 80 #include <sys/sysctl.h> 81 #include <sys/sysent.h> 82 #include <sys/shm.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_pager.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vnode_pager.h> 95 #include <vm/swap_pager.h> 96 #include <vm/uma.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, 100 * and sharing of virtual memory objects. In addition, 101 * this module provides for an efficient virtual copy of 102 * memory from one map to another. 103 * 104 * Synchronization is required prior to most operations. 105 * 106 * Maps consist of an ordered doubly-linked list of simple 107 * entries; a self-adjusting binary search tree of these 108 * entries is used to speed up lookups. 109 * 110 * Since portions of maps are specified by start/end addresses, 111 * which may not align with existing map entries, all 112 * routines merely "clip" entries to these start/end values. 113 * [That is, an entry is split into two, bordering at a 114 * start or end value.] Note that these clippings may not 115 * always be necessary (as the two resulting entries are then 116 * not changed); however, the clipping is done for convenience. 117 * 118 * As mentioned above, virtual copy operations are performed 119 * by copying VM object references from one map to 120 * another, and then marking both regions as copy-on-write. 121 */ 122 123 static struct mtx map_sleep_mtx; 124 static uma_zone_t mapentzone; 125 static uma_zone_t kmapentzone; 126 static uma_zone_t vmspace_zone; 127 static int vmspace_zinit(void *mem, int size, int flags); 128 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 129 vm_offset_t max); 130 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 133 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 134 vm_map_entry_t gap_entry); 135 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 136 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 137 #ifdef INVARIANTS 138 static void vmspace_zdtor(void *mem, int size, void *arg); 139 #endif 140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 141 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 142 int cow); 143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 144 vm_offset_t failed_addr); 145 146 #define CONTAINS_BITS(set, bits) ((~(set) & (bits)) == 0) 147 148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 151 152 /* 153 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 154 * stable. 155 */ 156 #define PROC_VMSPACE_LOCK(p) do { } while (0) 157 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 158 159 /* 160 * VM_MAP_RANGE_CHECK: [ internal use only ] 161 * 162 * Asserts that the starting and ending region 163 * addresses fall within the valid range of the map. 164 */ 165 #define VM_MAP_RANGE_CHECK(map, start, end) \ 166 { \ 167 if (start < vm_map_min(map)) \ 168 start = vm_map_min(map); \ 169 if (end > vm_map_max(map)) \ 170 end = vm_map_max(map); \ 171 if (start > end) \ 172 start = end; \ 173 } 174 175 #ifndef UMA_USE_DMAP 176 177 /* 178 * Allocate a new slab for kernel map entries. The kernel map may be locked or 179 * unlocked, depending on whether the request is coming from the kernel map or a 180 * submap. This function allocates a virtual address range directly from the 181 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map 182 * lock and also to avoid triggering allocator recursion in the vmem boundary 183 * tag allocator. 184 */ 185 static void * 186 kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 187 int wait) 188 { 189 vm_offset_t addr; 190 int error, locked; 191 192 *pflag = UMA_SLAB_PRIV; 193 194 if (!(locked = vm_map_locked(kernel_map))) 195 vm_map_lock(kernel_map); 196 addr = vm_map_findspace(kernel_map, vm_map_min(kernel_map), bytes); 197 if (addr + bytes < addr || addr + bytes > vm_map_max(kernel_map)) 198 panic("%s: kernel map is exhausted", __func__); 199 error = vm_map_insert(kernel_map, NULL, 0, addr, addr + bytes, 200 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 201 if (error != KERN_SUCCESS) 202 panic("%s: vm_map_insert() failed: %d", __func__, error); 203 if (!locked) 204 vm_map_unlock(kernel_map); 205 error = kmem_back_domain(domain, kernel_object, addr, bytes, M_NOWAIT | 206 M_USE_RESERVE | (wait & M_ZERO)); 207 if (error == KERN_SUCCESS) { 208 return ((void *)addr); 209 } else { 210 if (!locked) 211 vm_map_lock(kernel_map); 212 vm_map_delete(kernel_map, addr, bytes); 213 if (!locked) 214 vm_map_unlock(kernel_map); 215 return (NULL); 216 } 217 } 218 219 static void 220 kmapent_free(void *item, vm_size_t size, uint8_t pflag) 221 { 222 vm_offset_t addr; 223 int error __diagused; 224 225 if ((pflag & UMA_SLAB_PRIV) == 0) 226 /* XXX leaked */ 227 return; 228 229 addr = (vm_offset_t)item; 230 kmem_unback(kernel_object, addr, size); 231 error = vm_map_remove(kernel_map, addr, addr + size); 232 KASSERT(error == KERN_SUCCESS, 233 ("%s: vm_map_remove failed: %d", __func__, error)); 234 } 235 236 /* 237 * The worst-case upper bound on the number of kernel map entries that may be 238 * created before the zone must be replenished in _vm_map_unlock(). 239 */ 240 #define KMAPENT_RESERVE 1 241 242 #endif /* !UMD_MD_SMALL_ALLOC */ 243 244 /* 245 * vm_map_startup: 246 * 247 * Initialize the vm_map module. Must be called before any other vm_map 248 * routines. 249 * 250 * User map and entry structures are allocated from the general purpose 251 * memory pool. Kernel maps are statically defined. Kernel map entries 252 * require special handling to avoid recursion; see the comments above 253 * kmapent_alloc() and in vm_map_entry_create(). 254 */ 255 void 256 vm_map_startup(void) 257 { 258 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 259 260 /* 261 * Disable the use of per-CPU buckets: map entry allocation is 262 * serialized by the kernel map lock. 263 */ 264 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 266 UMA_ZONE_VM | UMA_ZONE_NOBUCKET); 267 #ifndef UMA_USE_DMAP 268 /* Reserve an extra map entry for use when replenishing the reserve. */ 269 uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1); 270 uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1); 271 uma_zone_set_allocf(kmapentzone, kmapent_alloc); 272 uma_zone_set_freef(kmapentzone, kmapent_free); 273 #endif 274 275 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 276 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 277 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 278 #ifdef INVARIANTS 279 vmspace_zdtor, 280 #else 281 NULL, 282 #endif 283 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 284 } 285 286 static int 287 vmspace_zinit(void *mem, int size, int flags) 288 { 289 struct vmspace *vm; 290 vm_map_t map; 291 292 vm = (struct vmspace *)mem; 293 map = &vm->vm_map; 294 295 memset(map, 0, sizeof(*map)); 296 mtx_init(&map->system_mtx, "vm map (system)", NULL, 297 MTX_DEF | MTX_DUPOK); 298 sx_init(&map->lock, "vm map (user)"); 299 PMAP_LOCK_INIT(vmspace_pmap(vm)); 300 return (0); 301 } 302 303 #ifdef INVARIANTS 304 static void 305 vmspace_zdtor(void *mem, int size, void *arg) 306 { 307 struct vmspace *vm; 308 309 vm = (struct vmspace *)mem; 310 KASSERT(vm->vm_map.nentries == 0, 311 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); 312 KASSERT(vm->vm_map.size == 0, 313 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); 314 } 315 #endif /* INVARIANTS */ 316 317 /* 318 * Allocate a vmspace structure, including a vm_map and pmap, 319 * and initialize those structures. The refcnt is set to 1. 320 */ 321 struct vmspace * 322 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 323 { 324 struct vmspace *vm; 325 326 vm = uma_zalloc(vmspace_zone, M_WAITOK); 327 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 328 if (!pinit(vmspace_pmap(vm))) { 329 uma_zfree(vmspace_zone, vm); 330 return (NULL); 331 } 332 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 333 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 334 refcount_init(&vm->vm_refcnt, 1); 335 vm->vm_shm = NULL; 336 vm->vm_swrss = 0; 337 vm->vm_tsize = 0; 338 vm->vm_dsize = 0; 339 vm->vm_ssize = 0; 340 vm->vm_taddr = 0; 341 vm->vm_daddr = 0; 342 vm->vm_maxsaddr = 0; 343 return (vm); 344 } 345 346 #ifdef RACCT 347 static void 348 vmspace_container_reset(struct proc *p) 349 { 350 351 PROC_LOCK(p); 352 racct_set(p, RACCT_DATA, 0); 353 racct_set(p, RACCT_STACK, 0); 354 racct_set(p, RACCT_RSS, 0); 355 racct_set(p, RACCT_MEMLOCK, 0); 356 racct_set(p, RACCT_VMEM, 0); 357 PROC_UNLOCK(p); 358 } 359 #endif 360 361 static inline void 362 vmspace_dofree(struct vmspace *vm) 363 { 364 365 CTR1(KTR_VM, "vmspace_free: %p", vm); 366 367 /* 368 * Make sure any SysV shm is freed, it might not have been in 369 * exit1(). 370 */ 371 shmexit(vm); 372 373 /* 374 * Lock the map, to wait out all other references to it. 375 * Delete all of the mappings and pages they hold, then call 376 * the pmap module to reclaim anything left. 377 */ 378 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 379 vm_map_max(&vm->vm_map)); 380 381 pmap_release(vmspace_pmap(vm)); 382 vm->vm_map.pmap = NULL; 383 uma_zfree(vmspace_zone, vm); 384 } 385 386 void 387 vmspace_free(struct vmspace *vm) 388 { 389 390 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 391 "vmspace_free() called"); 392 393 if (refcount_release(&vm->vm_refcnt)) 394 vmspace_dofree(vm); 395 } 396 397 void 398 vmspace_exitfree(struct proc *p) 399 { 400 struct vmspace *vm; 401 402 PROC_VMSPACE_LOCK(p); 403 vm = p->p_vmspace; 404 p->p_vmspace = NULL; 405 PROC_VMSPACE_UNLOCK(p); 406 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 407 vmspace_free(vm); 408 } 409 410 void 411 vmspace_exit(struct thread *td) 412 { 413 struct vmspace *vm; 414 struct proc *p; 415 bool released; 416 417 p = td->td_proc; 418 vm = p->p_vmspace; 419 420 /* 421 * Prepare to release the vmspace reference. The thread that releases 422 * the last reference is responsible for tearing down the vmspace. 423 * However, threads not releasing the final reference must switch to the 424 * kernel's vmspace0 before the decrement so that the subsequent pmap 425 * deactivation does not modify a freed vmspace. 426 */ 427 refcount_acquire(&vmspace0.vm_refcnt); 428 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { 429 if (p->p_vmspace != &vmspace0) { 430 PROC_VMSPACE_LOCK(p); 431 p->p_vmspace = &vmspace0; 432 PROC_VMSPACE_UNLOCK(p); 433 pmap_activate(td); 434 } 435 released = refcount_release(&vm->vm_refcnt); 436 } 437 if (released) { 438 /* 439 * pmap_remove_pages() expects the pmap to be active, so switch 440 * back first if necessary. 441 */ 442 if (p->p_vmspace != vm) { 443 PROC_VMSPACE_LOCK(p); 444 p->p_vmspace = vm; 445 PROC_VMSPACE_UNLOCK(p); 446 pmap_activate(td); 447 } 448 pmap_remove_pages(vmspace_pmap(vm)); 449 PROC_VMSPACE_LOCK(p); 450 p->p_vmspace = &vmspace0; 451 PROC_VMSPACE_UNLOCK(p); 452 pmap_activate(td); 453 vmspace_dofree(vm); 454 } 455 #ifdef RACCT 456 if (racct_enable) 457 vmspace_container_reset(p); 458 #endif 459 } 460 461 /* Acquire reference to vmspace owned by another process. */ 462 463 struct vmspace * 464 vmspace_acquire_ref(struct proc *p) 465 { 466 struct vmspace *vm; 467 468 PROC_VMSPACE_LOCK(p); 469 vm = p->p_vmspace; 470 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { 471 PROC_VMSPACE_UNLOCK(p); 472 return (NULL); 473 } 474 if (vm != p->p_vmspace) { 475 PROC_VMSPACE_UNLOCK(p); 476 vmspace_free(vm); 477 return (NULL); 478 } 479 PROC_VMSPACE_UNLOCK(p); 480 return (vm); 481 } 482 483 /* 484 * Switch between vmspaces in an AIO kernel process. 485 * 486 * The new vmspace is either the vmspace of a user process obtained 487 * from an active AIO request or the initial vmspace of the AIO kernel 488 * process (when it is idling). Because user processes will block to 489 * drain any active AIO requests before proceeding in exit() or 490 * execve(), the reference count for vmspaces from AIO requests can 491 * never be 0. Similarly, AIO kernel processes hold an extra 492 * reference on their initial vmspace for the life of the process. As 493 * a result, the 'newvm' vmspace always has a non-zero reference 494 * count. This permits an additional reference on 'newvm' to be 495 * acquired via a simple atomic increment rather than the loop in 496 * vmspace_acquire_ref() above. 497 */ 498 void 499 vmspace_switch_aio(struct vmspace *newvm) 500 { 501 struct vmspace *oldvm; 502 503 /* XXX: Need some way to assert that this is an aio daemon. */ 504 505 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, 506 ("vmspace_switch_aio: newvm unreferenced")); 507 508 oldvm = curproc->p_vmspace; 509 if (oldvm == newvm) 510 return; 511 512 /* 513 * Point to the new address space and refer to it. 514 */ 515 curproc->p_vmspace = newvm; 516 refcount_acquire(&newvm->vm_refcnt); 517 518 /* Activate the new mapping. */ 519 pmap_activate(curthread); 520 521 vmspace_free(oldvm); 522 } 523 524 void 525 _vm_map_lock(vm_map_t map, const char *file, int line) 526 { 527 528 if (map->system_map) 529 mtx_lock_flags_(&map->system_mtx, 0, file, line); 530 else 531 sx_xlock_(&map->lock, file, line); 532 map->timestamp++; 533 } 534 535 void 536 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 537 { 538 vm_object_t object; 539 struct vnode *vp; 540 bool vp_held; 541 542 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 543 return; 544 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 545 ("Submap with execs")); 546 object = entry->object.vm_object; 547 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 548 if ((object->flags & OBJ_ANON) != 0) 549 object = object->handle; 550 else 551 KASSERT(object->backing_object == NULL, 552 ("non-anon object %p shadows", object)); 553 KASSERT(object != NULL, ("No content object for text, entry %p obj %p", 554 entry, entry->object.vm_object)); 555 556 /* 557 * Mostly, we do not lock the backing object. It is 558 * referenced by the entry we are processing, so it cannot go 559 * away. 560 */ 561 vm_pager_getvp(object, &vp, &vp_held); 562 if (vp != NULL) { 563 if (add) { 564 VOP_SET_TEXT_CHECKED(vp); 565 } else { 566 vn_lock(vp, LK_SHARED | LK_RETRY); 567 VOP_UNSET_TEXT_CHECKED(vp); 568 VOP_UNLOCK(vp); 569 } 570 if (vp_held) 571 vdrop(vp); 572 } 573 } 574 575 /* 576 * Use a different name for this vm_map_entry field when it's use 577 * is not consistent with its use as part of an ordered search tree. 578 */ 579 #define defer_next right 580 581 static void 582 vm_map_process_deferred(void) 583 { 584 struct thread *td; 585 vm_map_entry_t entry, next; 586 vm_object_t object; 587 588 td = curthread; 589 entry = td->td_map_def_user; 590 td->td_map_def_user = NULL; 591 while (entry != NULL) { 592 next = entry->defer_next; 593 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | 594 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | 595 MAP_ENTRY_VN_EXEC)); 596 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { 597 /* 598 * Decrement the object's writemappings and 599 * possibly the vnode's v_writecount. 600 */ 601 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 602 ("Submap with writecount")); 603 object = entry->object.vm_object; 604 KASSERT(object != NULL, ("No object for writecount")); 605 vm_pager_release_writecount(object, entry->start, 606 entry->end); 607 } 608 vm_map_entry_set_vnode_text(entry, false); 609 vm_map_entry_deallocate(entry, FALSE); 610 entry = next; 611 } 612 } 613 614 #ifdef INVARIANTS 615 static void 616 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 617 { 618 619 if (map->system_map) 620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 621 else 622 sx_assert_(&map->lock, SA_XLOCKED, file, line); 623 } 624 625 #define VM_MAP_ASSERT_LOCKED(map) \ 626 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 627 628 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL }; 629 #ifdef DIAGNOSTIC 630 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK; 631 #else 632 static int enable_vmmap_check = VMMAP_CHECK_NONE; 633 #endif 634 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 635 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 636 637 static void _vm_map_assert_consistent(vm_map_t map, int check); 638 639 #define VM_MAP_ASSERT_CONSISTENT(map) \ 640 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL) 641 #ifdef DIAGNOSTIC 642 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \ 643 if (map->nupdates > map->nentries) { \ 644 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \ 645 map->nupdates = 0; \ 646 } \ 647 } while (0) 648 #else 649 #define VM_MAP_UNLOCK_CONSISTENT(map) 650 #endif 651 #else 652 #define VM_MAP_ASSERT_LOCKED(map) 653 #define VM_MAP_ASSERT_CONSISTENT(map) 654 #define VM_MAP_UNLOCK_CONSISTENT(map) 655 #endif /* INVARIANTS */ 656 657 void 658 _vm_map_unlock(vm_map_t map, const char *file, int line) 659 { 660 661 VM_MAP_UNLOCK_CONSISTENT(map); 662 if (map->system_map) { 663 #ifndef UMA_USE_DMAP 664 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { 665 uma_prealloc(kmapentzone, 1); 666 map->flags &= ~MAP_REPLENISH; 667 } 668 #endif 669 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 670 } else { 671 sx_xunlock_(&map->lock, file, line); 672 vm_map_process_deferred(); 673 } 674 } 675 676 void 677 _vm_map_lock_read(vm_map_t map, const char *file, int line) 678 { 679 680 if (map->system_map) 681 mtx_lock_flags_(&map->system_mtx, 0, file, line); 682 else 683 sx_slock_(&map->lock, file, line); 684 } 685 686 void 687 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 688 { 689 690 if (map->system_map) { 691 KASSERT((map->flags & MAP_REPLENISH) == 0, 692 ("%s: MAP_REPLENISH leaked", __func__)); 693 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 694 } else { 695 sx_sunlock_(&map->lock, file, line); 696 vm_map_process_deferred(); 697 } 698 } 699 700 int 701 _vm_map_trylock(vm_map_t map, const char *file, int line) 702 { 703 int error; 704 705 error = map->system_map ? 706 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 707 !sx_try_xlock_(&map->lock, file, line); 708 if (error == 0) 709 map->timestamp++; 710 return (error == 0); 711 } 712 713 int 714 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 715 { 716 int error; 717 718 error = map->system_map ? 719 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 720 !sx_try_slock_(&map->lock, file, line); 721 return (error == 0); 722 } 723 724 /* 725 * _vm_map_lock_upgrade: [ internal use only ] 726 * 727 * Tries to upgrade a read (shared) lock on the specified map to a write 728 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 729 * non-zero value if the upgrade fails. If the upgrade fails, the map is 730 * returned without a read or write lock held. 731 * 732 * Requires that the map be read locked. 733 */ 734 int 735 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 736 { 737 unsigned int last_timestamp; 738 739 if (map->system_map) { 740 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 741 } else { 742 if (!sx_try_upgrade_(&map->lock, file, line)) { 743 last_timestamp = map->timestamp; 744 sx_sunlock_(&map->lock, file, line); 745 vm_map_process_deferred(); 746 /* 747 * If the map's timestamp does not change while the 748 * map is unlocked, then the upgrade succeeds. 749 */ 750 sx_xlock_(&map->lock, file, line); 751 if (last_timestamp != map->timestamp) { 752 sx_xunlock_(&map->lock, file, line); 753 return (1); 754 } 755 } 756 } 757 map->timestamp++; 758 return (0); 759 } 760 761 void 762 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 763 { 764 765 if (map->system_map) { 766 KASSERT((map->flags & MAP_REPLENISH) == 0, 767 ("%s: MAP_REPLENISH leaked", __func__)); 768 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 769 } else { 770 VM_MAP_UNLOCK_CONSISTENT(map); 771 sx_downgrade_(&map->lock, file, line); 772 } 773 } 774 775 /* 776 * vm_map_locked: 777 * 778 * Returns a non-zero value if the caller holds a write (exclusive) lock 779 * on the specified map and the value "0" otherwise. 780 */ 781 int 782 vm_map_locked(vm_map_t map) 783 { 784 785 if (map->system_map) 786 return (mtx_owned(&map->system_mtx)); 787 else 788 return (sx_xlocked(&map->lock)); 789 } 790 791 /* 792 * _vm_map_unlock_and_wait: 793 * 794 * Atomically releases the lock on the specified map and puts the calling 795 * thread to sleep. The calling thread will remain asleep until either 796 * vm_map_wakeup() is performed on the map or the specified timeout is 797 * exceeded. 798 * 799 * WARNING! This function does not perform deferred deallocations of 800 * objects and map entries. Therefore, the calling thread is expected to 801 * reacquire the map lock after reawakening and later perform an ordinary 802 * unlock operation, such as vm_map_unlock(), before completing its 803 * operation on the map. 804 */ 805 int 806 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 807 { 808 809 VM_MAP_UNLOCK_CONSISTENT(map); 810 mtx_lock(&map_sleep_mtx); 811 if (map->system_map) { 812 KASSERT((map->flags & MAP_REPLENISH) == 0, 813 ("%s: MAP_REPLENISH leaked", __func__)); 814 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 815 } else { 816 sx_xunlock_(&map->lock, file, line); 817 } 818 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 819 timo)); 820 } 821 822 /* 823 * vm_map_wakeup: 824 * 825 * Awaken any threads that have slept on the map using 826 * vm_map_unlock_and_wait(). 827 */ 828 void 829 vm_map_wakeup(vm_map_t map) 830 { 831 832 /* 833 * Acquire and release map_sleep_mtx to prevent a wakeup() 834 * from being performed (and lost) between the map unlock 835 * and the msleep() in _vm_map_unlock_and_wait(). 836 */ 837 mtx_lock(&map_sleep_mtx); 838 mtx_unlock(&map_sleep_mtx); 839 wakeup(&map->root); 840 } 841 842 void 843 vm_map_busy(vm_map_t map) 844 { 845 846 VM_MAP_ASSERT_LOCKED(map); 847 map->busy++; 848 } 849 850 void 851 vm_map_unbusy(vm_map_t map) 852 { 853 854 VM_MAP_ASSERT_LOCKED(map); 855 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 856 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 857 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 858 wakeup(&map->busy); 859 } 860 } 861 862 void 863 vm_map_wait_busy(vm_map_t map) 864 { 865 866 VM_MAP_ASSERT_LOCKED(map); 867 while (map->busy) { 868 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 869 if (map->system_map) 870 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 871 else 872 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 873 } 874 map->timestamp++; 875 } 876 877 long 878 vmspace_resident_count(struct vmspace *vmspace) 879 { 880 return pmap_resident_count(vmspace_pmap(vmspace)); 881 } 882 883 /* 884 * Initialize an existing vm_map structure 885 * such as that in the vmspace structure. 886 */ 887 static void 888 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 889 { 890 891 map->header.eflags = MAP_ENTRY_HEADER; 892 map->needs_wakeup = FALSE; 893 map->system_map = 0; 894 map->pmap = pmap; 895 map->header.end = min; 896 map->header.start = max; 897 map->flags = 0; 898 map->header.left = map->header.right = &map->header; 899 map->root = NULL; 900 map->timestamp = 0; 901 map->busy = 0; 902 map->anon_loc = 0; 903 #ifdef DIAGNOSTIC 904 map->nupdates = 0; 905 #endif 906 } 907 908 void 909 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 910 { 911 912 _vm_map_init(map, pmap, min, max); 913 mtx_init(&map->system_mtx, "vm map (system)", NULL, 914 MTX_DEF | MTX_DUPOK); 915 sx_init(&map->lock, "vm map (user)"); 916 } 917 918 /* 919 * vm_map_entry_dispose: [ internal use only ] 920 * 921 * Inverse of vm_map_entry_create. 922 */ 923 static void 924 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 925 { 926 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 927 } 928 929 /* 930 * vm_map_entry_create: [ internal use only ] 931 * 932 * Allocates a VM map entry for insertion. 933 * No entry fields are filled in. 934 */ 935 static vm_map_entry_t 936 vm_map_entry_create(vm_map_t map) 937 { 938 vm_map_entry_t new_entry; 939 940 #ifndef UMA_USE_DMAP 941 if (map == kernel_map) { 942 VM_MAP_ASSERT_LOCKED(map); 943 944 /* 945 * A new slab of kernel map entries cannot be allocated at this 946 * point because the kernel map has not yet been updated to 947 * reflect the caller's request. Therefore, we allocate a new 948 * map entry, dipping into the reserve if necessary, and set a 949 * flag indicating that the reserve must be replenished before 950 * the map is unlocked. 951 */ 952 new_entry = uma_zalloc(kmapentzone, M_NOWAIT | M_NOVM); 953 if (new_entry == NULL) { 954 new_entry = uma_zalloc(kmapentzone, 955 M_NOWAIT | M_NOVM | M_USE_RESERVE); 956 kernel_map->flags |= MAP_REPLENISH; 957 } 958 } else 959 #endif 960 if (map->system_map) { 961 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 962 } else { 963 new_entry = uma_zalloc(mapentzone, M_WAITOK); 964 } 965 KASSERT(new_entry != NULL, 966 ("vm_map_entry_create: kernel resources exhausted")); 967 return (new_entry); 968 } 969 970 /* 971 * vm_map_entry_set_behavior: 972 * 973 * Set the expected access behavior, either normal, random, or 974 * sequential. 975 */ 976 static inline void 977 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 978 { 979 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 980 (behavior & MAP_ENTRY_BEHAV_MASK); 981 } 982 983 /* 984 * vm_map_entry_max_free_{left,right}: 985 * 986 * Compute the size of the largest free gap between two entries, 987 * one the root of a tree and the other the ancestor of that root 988 * that is the least or greatest ancestor found on the search path. 989 */ 990 static inline vm_size_t 991 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) 992 { 993 994 return (root->left != left_ancestor ? 995 root->left->max_free : root->start - left_ancestor->end); 996 } 997 998 static inline vm_size_t 999 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) 1000 { 1001 1002 return (root->right != right_ancestor ? 1003 root->right->max_free : right_ancestor->start - root->end); 1004 } 1005 1006 /* 1007 * vm_map_entry_{pred,succ}: 1008 * 1009 * Find the {predecessor, successor} of the entry by taking one step 1010 * in the appropriate direction and backtracking as much as necessary. 1011 * vm_map_entry_succ is defined in vm_map.h. 1012 */ 1013 static inline vm_map_entry_t 1014 vm_map_entry_pred(vm_map_entry_t entry) 1015 { 1016 vm_map_entry_t prior; 1017 1018 prior = entry->left; 1019 if (prior->right->start < entry->start) { 1020 do 1021 prior = prior->right; 1022 while (prior->right != entry); 1023 } 1024 return (prior); 1025 } 1026 1027 static inline vm_size_t 1028 vm_size_max(vm_size_t a, vm_size_t b) 1029 { 1030 1031 return (a > b ? a : b); 1032 } 1033 1034 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \ 1035 vm_map_entry_t z; \ 1036 vm_size_t max_free; \ 1037 \ 1038 /* \ 1039 * Infer root->right->max_free == root->max_free when \ 1040 * y->max_free < root->max_free || root->max_free == 0. \ 1041 * Otherwise, look right to find it. \ 1042 */ \ 1043 y = root->left; \ 1044 max_free = root->max_free; \ 1045 KASSERT(max_free == vm_size_max( \ 1046 vm_map_entry_max_free_left(root, llist), \ 1047 vm_map_entry_max_free_right(root, rlist)), \ 1048 ("%s: max_free invariant fails", __func__)); \ 1049 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \ 1050 max_free = vm_map_entry_max_free_right(root, rlist); \ 1051 if (y != llist && (test)) { \ 1052 /* Rotate right and make y root. */ \ 1053 z = y->right; \ 1054 if (z != root) { \ 1055 root->left = z; \ 1056 y->right = root; \ 1057 if (max_free < y->max_free) \ 1058 root->max_free = max_free = \ 1059 vm_size_max(max_free, z->max_free); \ 1060 } else if (max_free < y->max_free) \ 1061 root->max_free = max_free = \ 1062 vm_size_max(max_free, root->start - y->end);\ 1063 root = y; \ 1064 y = root->left; \ 1065 } \ 1066 /* Copy right->max_free. Put root on rlist. */ \ 1067 root->max_free = max_free; \ 1068 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \ 1069 ("%s: max_free not copied from right", __func__)); \ 1070 root->left = rlist; \ 1071 rlist = root; \ 1072 root = y != llist ? y : NULL; \ 1073 } while (0) 1074 1075 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \ 1076 vm_map_entry_t z; \ 1077 vm_size_t max_free; \ 1078 \ 1079 /* \ 1080 * Infer root->left->max_free == root->max_free when \ 1081 * y->max_free < root->max_free || root->max_free == 0. \ 1082 * Otherwise, look left to find it. \ 1083 */ \ 1084 y = root->right; \ 1085 max_free = root->max_free; \ 1086 KASSERT(max_free == vm_size_max( \ 1087 vm_map_entry_max_free_left(root, llist), \ 1088 vm_map_entry_max_free_right(root, rlist)), \ 1089 ("%s: max_free invariant fails", __func__)); \ 1090 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \ 1091 max_free = vm_map_entry_max_free_left(root, llist); \ 1092 if (y != rlist && (test)) { \ 1093 /* Rotate left and make y root. */ \ 1094 z = y->left; \ 1095 if (z != root) { \ 1096 root->right = z; \ 1097 y->left = root; \ 1098 if (max_free < y->max_free) \ 1099 root->max_free = max_free = \ 1100 vm_size_max(max_free, z->max_free); \ 1101 } else if (max_free < y->max_free) \ 1102 root->max_free = max_free = \ 1103 vm_size_max(max_free, y->start - root->end);\ 1104 root = y; \ 1105 y = root->right; \ 1106 } \ 1107 /* Copy left->max_free. Put root on llist. */ \ 1108 root->max_free = max_free; \ 1109 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \ 1110 ("%s: max_free not copied from left", __func__)); \ 1111 root->right = llist; \ 1112 llist = root; \ 1113 root = y != rlist ? y : NULL; \ 1114 } while (0) 1115 1116 /* 1117 * Walk down the tree until we find addr or a gap where addr would go, breaking 1118 * off left and right subtrees of nodes less than, or greater than addr. Treat 1119 * subtrees with root->max_free < length as empty trees. llist and rlist are 1120 * the two sides in reverse order (bottom-up), with llist linked by the right 1121 * pointer and rlist linked by the left pointer in the vm_map_entry, and both 1122 * lists terminated by &map->header. This function, and the subsequent call to 1123 * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address 1124 * values in &map->header. 1125 */ 1126 static __always_inline vm_map_entry_t 1127 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, 1128 vm_map_entry_t *llist, vm_map_entry_t *rlist) 1129 { 1130 vm_map_entry_t left, right, root, y; 1131 1132 left = right = &map->header; 1133 root = map->root; 1134 while (root != NULL && root->max_free >= length) { 1135 KASSERT(left->end <= root->start && 1136 root->end <= right->start, 1137 ("%s: root not within tree bounds", __func__)); 1138 if (addr < root->start) { 1139 SPLAY_LEFT_STEP(root, y, left, right, 1140 y->max_free >= length && addr < y->start); 1141 } else if (addr >= root->end) { 1142 SPLAY_RIGHT_STEP(root, y, left, right, 1143 y->max_free >= length && addr >= y->end); 1144 } else 1145 break; 1146 } 1147 *llist = left; 1148 *rlist = right; 1149 return (root); 1150 } 1151 1152 static __always_inline void 1153 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist) 1154 { 1155 vm_map_entry_t hi, right, y; 1156 1157 right = *rlist; 1158 hi = root->right == right ? NULL : root->right; 1159 if (hi == NULL) 1160 return; 1161 do 1162 SPLAY_LEFT_STEP(hi, y, root, right, true); 1163 while (hi != NULL); 1164 *rlist = right; 1165 } 1166 1167 static __always_inline void 1168 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist) 1169 { 1170 vm_map_entry_t left, lo, y; 1171 1172 left = *llist; 1173 lo = root->left == left ? NULL : root->left; 1174 if (lo == NULL) 1175 return; 1176 do 1177 SPLAY_RIGHT_STEP(lo, y, left, root, true); 1178 while (lo != NULL); 1179 *llist = left; 1180 } 1181 1182 static inline void 1183 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b) 1184 { 1185 vm_map_entry_t tmp; 1186 1187 tmp = *b; 1188 *b = *a; 1189 *a = tmp; 1190 } 1191 1192 /* 1193 * Walk back up the two spines, flip the pointers and set max_free. The 1194 * subtrees of the root go at the bottom of llist and rlist. 1195 */ 1196 static vm_size_t 1197 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root, 1198 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist) 1199 { 1200 do { 1201 /* 1202 * The max_free values of the children of llist are in 1203 * llist->max_free and max_free. Update with the 1204 * max value. 1205 */ 1206 llist->max_free = max_free = 1207 vm_size_max(llist->max_free, max_free); 1208 vm_map_entry_swap(&llist->right, &tail); 1209 vm_map_entry_swap(&tail, &llist); 1210 } while (llist != header); 1211 root->left = tail; 1212 return (max_free); 1213 } 1214 1215 /* 1216 * When llist is known to be the predecessor of root. 1217 */ 1218 static inline vm_size_t 1219 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root, 1220 vm_map_entry_t llist) 1221 { 1222 vm_size_t max_free; 1223 1224 max_free = root->start - llist->end; 1225 if (llist != header) { 1226 max_free = vm_map_splay_merge_left_walk(header, root, 1227 root, max_free, llist); 1228 } else { 1229 root->left = header; 1230 header->right = root; 1231 } 1232 return (max_free); 1233 } 1234 1235 /* 1236 * When llist may or may not be the predecessor of root. 1237 */ 1238 static inline vm_size_t 1239 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root, 1240 vm_map_entry_t llist) 1241 { 1242 vm_size_t max_free; 1243 1244 max_free = vm_map_entry_max_free_left(root, llist); 1245 if (llist != header) { 1246 max_free = vm_map_splay_merge_left_walk(header, root, 1247 root->left == llist ? root : root->left, 1248 max_free, llist); 1249 } 1250 return (max_free); 1251 } 1252 1253 static vm_size_t 1254 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root, 1255 vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist) 1256 { 1257 do { 1258 /* 1259 * The max_free values of the children of rlist are in 1260 * rlist->max_free and max_free. Update with the 1261 * max value. 1262 */ 1263 rlist->max_free = max_free = 1264 vm_size_max(rlist->max_free, max_free); 1265 vm_map_entry_swap(&rlist->left, &tail); 1266 vm_map_entry_swap(&tail, &rlist); 1267 } while (rlist != header); 1268 root->right = tail; 1269 return (max_free); 1270 } 1271 1272 /* 1273 * When rlist is known to be the succecessor of root. 1274 */ 1275 static inline vm_size_t 1276 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root, 1277 vm_map_entry_t rlist) 1278 { 1279 vm_size_t max_free; 1280 1281 max_free = rlist->start - root->end; 1282 if (rlist != header) { 1283 max_free = vm_map_splay_merge_right_walk(header, root, 1284 root, max_free, rlist); 1285 } else { 1286 root->right = header; 1287 header->left = root; 1288 } 1289 return (max_free); 1290 } 1291 1292 /* 1293 * When rlist may or may not be the succecessor of root. 1294 */ 1295 static inline vm_size_t 1296 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root, 1297 vm_map_entry_t rlist) 1298 { 1299 vm_size_t max_free; 1300 1301 max_free = vm_map_entry_max_free_right(root, rlist); 1302 if (rlist != header) { 1303 max_free = vm_map_splay_merge_right_walk(header, root, 1304 root->right == rlist ? root : root->right, 1305 max_free, rlist); 1306 } 1307 return (max_free); 1308 } 1309 1310 /* 1311 * vm_map_splay: 1312 * 1313 * The Sleator and Tarjan top-down splay algorithm with the 1314 * following variation. Max_free must be computed bottom-up, so 1315 * on the downward pass, maintain the left and right spines in 1316 * reverse order. Then, make a second pass up each side to fix 1317 * the pointers and compute max_free. The time bound is O(log n) 1318 * amortized. 1319 * 1320 * The tree is threaded, which means that there are no null pointers. 1321 * When a node has no left child, its left pointer points to its 1322 * predecessor, which the last ancestor on the search path from the root 1323 * where the search branched right. Likewise, when a node has no right 1324 * child, its right pointer points to its successor. The map header node 1325 * is the predecessor of the first map entry, and the successor of the 1326 * last. 1327 * 1328 * The new root is the vm_map_entry containing "addr", or else an 1329 * adjacent entry (lower if possible) if addr is not in the tree. 1330 * 1331 * The map must be locked, and leaves it so. 1332 * 1333 * Returns: the new root. 1334 */ 1335 static vm_map_entry_t 1336 vm_map_splay(vm_map_t map, vm_offset_t addr) 1337 { 1338 vm_map_entry_t header, llist, rlist, root; 1339 vm_size_t max_free_left, max_free_right; 1340 1341 header = &map->header; 1342 root = vm_map_splay_split(map, addr, 0, &llist, &rlist); 1343 if (root != NULL) { 1344 max_free_left = vm_map_splay_merge_left(header, root, llist); 1345 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1346 } else if (llist != header) { 1347 /* 1348 * Recover the greatest node in the left 1349 * subtree and make it the root. 1350 */ 1351 root = llist; 1352 llist = root->right; 1353 max_free_left = vm_map_splay_merge_left(header, root, llist); 1354 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1355 } else if (rlist != header) { 1356 /* 1357 * Recover the least node in the right 1358 * subtree and make it the root. 1359 */ 1360 root = rlist; 1361 rlist = root->left; 1362 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1363 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1364 } else { 1365 /* There is no root. */ 1366 return (NULL); 1367 } 1368 root->max_free = vm_size_max(max_free_left, max_free_right); 1369 map->root = root; 1370 VM_MAP_ASSERT_CONSISTENT(map); 1371 return (root); 1372 } 1373 1374 /* 1375 * vm_map_entry_{un,}link: 1376 * 1377 * Insert/remove entries from maps. On linking, if new entry clips 1378 * existing entry, trim existing entry to avoid overlap, and manage 1379 * offsets. On unlinking, merge disappearing entry with neighbor, if 1380 * called for, and manage offsets. Callers should not modify fields in 1381 * entries already mapped. 1382 */ 1383 static void 1384 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1385 { 1386 vm_map_entry_t header, llist, rlist, root; 1387 vm_size_t max_free_left, max_free_right; 1388 1389 CTR3(KTR_VM, 1390 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1391 map->nentries, entry); 1392 VM_MAP_ASSERT_LOCKED(map); 1393 map->nentries++; 1394 header = &map->header; 1395 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1396 if (root == NULL) { 1397 /* 1398 * The new entry does not overlap any existing entry in the 1399 * map, so it becomes the new root of the map tree. 1400 */ 1401 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1402 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1403 } else if (entry->start == root->start) { 1404 /* 1405 * The new entry is a clone of root, with only the end field 1406 * changed. The root entry will be shrunk to abut the new 1407 * entry, and will be the right child of the new root entry in 1408 * the modified map. 1409 */ 1410 KASSERT(entry->end < root->end, 1411 ("%s: clip_start not within entry", __func__)); 1412 vm_map_splay_findprev(root, &llist); 1413 if ((root->eflags & MAP_ENTRY_STACK_GAP) == 0) 1414 root->offset += entry->end - root->start; 1415 root->start = entry->end; 1416 max_free_left = vm_map_splay_merge_pred(header, entry, llist); 1417 max_free_right = root->max_free = vm_size_max( 1418 vm_map_splay_merge_pred(entry, root, entry), 1419 vm_map_splay_merge_right(header, root, rlist)); 1420 } else { 1421 /* 1422 * The new entry is a clone of root, with only the start field 1423 * changed. The root entry will be shrunk to abut the new 1424 * entry, and will be the left child of the new root entry in 1425 * the modified map. 1426 */ 1427 KASSERT(entry->end == root->end, 1428 ("%s: clip_start not within entry", __func__)); 1429 vm_map_splay_findnext(root, &rlist); 1430 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) 1431 entry->offset += entry->start - root->start; 1432 root->end = entry->start; 1433 max_free_left = root->max_free = vm_size_max( 1434 vm_map_splay_merge_left(header, root, llist), 1435 vm_map_splay_merge_succ(entry, root, entry)); 1436 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); 1437 } 1438 entry->max_free = vm_size_max(max_free_left, max_free_right); 1439 map->root = entry; 1440 VM_MAP_ASSERT_CONSISTENT(map); 1441 } 1442 1443 enum unlink_merge_type { 1444 UNLINK_MERGE_NONE, 1445 UNLINK_MERGE_NEXT 1446 }; 1447 1448 static void 1449 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, 1450 enum unlink_merge_type op) 1451 { 1452 vm_map_entry_t header, llist, rlist, root; 1453 vm_size_t max_free_left, max_free_right; 1454 1455 VM_MAP_ASSERT_LOCKED(map); 1456 header = &map->header; 1457 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1458 KASSERT(root != NULL, 1459 ("vm_map_entry_unlink: unlink object not mapped")); 1460 1461 vm_map_splay_findprev(root, &llist); 1462 vm_map_splay_findnext(root, &rlist); 1463 if (op == UNLINK_MERGE_NEXT) { 1464 rlist->start = root->start; 1465 MPASS((rlist->eflags & MAP_ENTRY_STACK_GAP) == 0); 1466 rlist->offset = root->offset; 1467 } 1468 if (llist != header) { 1469 root = llist; 1470 llist = root->right; 1471 max_free_left = vm_map_splay_merge_left(header, root, llist); 1472 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1473 } else if (rlist != header) { 1474 root = rlist; 1475 rlist = root->left; 1476 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1477 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1478 } else { 1479 header->left = header->right = header; 1480 root = NULL; 1481 } 1482 if (root != NULL) 1483 root->max_free = vm_size_max(max_free_left, max_free_right); 1484 map->root = root; 1485 VM_MAP_ASSERT_CONSISTENT(map); 1486 map->nentries--; 1487 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1488 map->nentries, entry); 1489 } 1490 1491 /* 1492 * vm_map_entry_resize: 1493 * 1494 * Resize a vm_map_entry, recompute the amount of free space that 1495 * follows it and propagate that value up the tree. 1496 * 1497 * The map must be locked, and leaves it so. 1498 */ 1499 static void 1500 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) 1501 { 1502 vm_map_entry_t header, llist, rlist, root; 1503 1504 VM_MAP_ASSERT_LOCKED(map); 1505 header = &map->header; 1506 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1507 KASSERT(root != NULL, ("%s: resize object not mapped", __func__)); 1508 vm_map_splay_findnext(root, &rlist); 1509 entry->end += grow_amount; 1510 root->max_free = vm_size_max( 1511 vm_map_splay_merge_left(header, root, llist), 1512 vm_map_splay_merge_succ(header, root, rlist)); 1513 map->root = root; 1514 VM_MAP_ASSERT_CONSISTENT(map); 1515 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", 1516 __func__, map, map->nentries, entry); 1517 } 1518 1519 /* 1520 * vm_map_lookup_entry: [ internal use only ] 1521 * 1522 * Finds the map entry containing (or 1523 * immediately preceding) the specified address 1524 * in the given map; the entry is returned 1525 * in the "entry" parameter. The boolean 1526 * result indicates whether the address is 1527 * actually contained in the map. 1528 */ 1529 boolean_t 1530 vm_map_lookup_entry( 1531 vm_map_t map, 1532 vm_offset_t address, 1533 vm_map_entry_t *entry) /* OUT */ 1534 { 1535 vm_map_entry_t cur, header, lbound, ubound; 1536 boolean_t locked; 1537 1538 /* 1539 * If the map is empty, then the map entry immediately preceding 1540 * "address" is the map's header. 1541 */ 1542 header = &map->header; 1543 cur = map->root; 1544 if (cur == NULL) { 1545 *entry = header; 1546 return (FALSE); 1547 } 1548 if (address >= cur->start && cur->end > address) { 1549 *entry = cur; 1550 return (TRUE); 1551 } 1552 if ((locked = vm_map_locked(map)) || 1553 sx_try_upgrade(&map->lock)) { 1554 /* 1555 * Splay requires a write lock on the map. However, it only 1556 * restructures the binary search tree; it does not otherwise 1557 * change the map. Thus, the map's timestamp need not change 1558 * on a temporary upgrade. 1559 */ 1560 cur = vm_map_splay(map, address); 1561 if (!locked) { 1562 VM_MAP_UNLOCK_CONSISTENT(map); 1563 sx_downgrade(&map->lock); 1564 } 1565 1566 /* 1567 * If "address" is contained within a map entry, the new root 1568 * is that map entry. Otherwise, the new root is a map entry 1569 * immediately before or after "address". 1570 */ 1571 if (address < cur->start) { 1572 *entry = header; 1573 return (FALSE); 1574 } 1575 *entry = cur; 1576 return (address < cur->end); 1577 } 1578 /* 1579 * Since the map is only locked for read access, perform a 1580 * standard binary search tree lookup for "address". 1581 */ 1582 lbound = ubound = header; 1583 for (;;) { 1584 if (address < cur->start) { 1585 ubound = cur; 1586 cur = cur->left; 1587 if (cur == lbound) 1588 break; 1589 } else if (cur->end <= address) { 1590 lbound = cur; 1591 cur = cur->right; 1592 if (cur == ubound) 1593 break; 1594 } else { 1595 *entry = cur; 1596 return (TRUE); 1597 } 1598 } 1599 *entry = lbound; 1600 return (FALSE); 1601 } 1602 1603 /* 1604 * vm_map_insert1() is identical to vm_map_insert() except that it 1605 * returns the newly inserted map entry in '*res'. In case the new 1606 * entry is coalesced with a neighbor or an existing entry was 1607 * resized, that entry is returned. In any case, the returned entry 1608 * covers the specified address range. 1609 */ 1610 static int 1611 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1612 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow, 1613 vm_map_entry_t *res) 1614 { 1615 vm_map_entry_t new_entry, next_entry, prev_entry; 1616 struct ucred *cred; 1617 vm_eflags_t protoeflags; 1618 vm_inherit_t inheritance; 1619 u_long bdry; 1620 u_int bidx; 1621 1622 VM_MAP_ASSERT_LOCKED(map); 1623 KASSERT(object != kernel_object || 1624 (cow & MAP_COPY_ON_WRITE) == 0, 1625 ("vm_map_insert: kernel object and COW")); 1626 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 || 1627 (cow & MAP_SPLIT_BOUNDARY_MASK) != 0, 1628 ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x", 1629 object, cow)); 1630 KASSERT((prot & ~max) == 0, 1631 ("prot %#x is not subset of max_prot %#x", prot, max)); 1632 1633 /* 1634 * Check that the start and end points are not bogus. 1635 */ 1636 if (start == end || !vm_map_range_valid(map, start, end)) 1637 return (KERN_INVALID_ADDRESS); 1638 1639 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | 1640 VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) 1641 return (KERN_PROTECTION_FAILURE); 1642 1643 /* 1644 * Find the entry prior to the proposed starting address; if it's part 1645 * of an existing entry, this range is bogus. 1646 */ 1647 if (vm_map_lookup_entry(map, start, &prev_entry)) 1648 return (KERN_NO_SPACE); 1649 1650 /* 1651 * Assert that the next entry doesn't overlap the end point. 1652 */ 1653 next_entry = vm_map_entry_succ(prev_entry); 1654 if (next_entry->start < end) 1655 return (KERN_NO_SPACE); 1656 1657 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1658 max != VM_PROT_NONE)) 1659 return (KERN_INVALID_ARGUMENT); 1660 1661 protoeflags = 0; 1662 if (cow & MAP_COPY_ON_WRITE) 1663 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1664 if (cow & MAP_NOFAULT) 1665 protoeflags |= MAP_ENTRY_NOFAULT; 1666 if (cow & MAP_DISABLE_SYNCER) 1667 protoeflags |= MAP_ENTRY_NOSYNC; 1668 if (cow & MAP_DISABLE_COREDUMP) 1669 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1670 if (cow & MAP_STACK_AREA) 1671 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1672 if (cow & MAP_WRITECOUNT) 1673 protoeflags |= MAP_ENTRY_WRITECNT; 1674 if (cow & MAP_VN_EXEC) 1675 protoeflags |= MAP_ENTRY_VN_EXEC; 1676 if ((cow & MAP_CREATE_GUARD) != 0) 1677 protoeflags |= MAP_ENTRY_GUARD; 1678 if ((cow & MAP_CREATE_STACK_GAP) != 0) 1679 protoeflags |= MAP_ENTRY_STACK_GAP; 1680 if (cow & MAP_INHERIT_SHARE) 1681 inheritance = VM_INHERIT_SHARE; 1682 else 1683 inheritance = VM_INHERIT_DEFAULT; 1684 if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) { 1685 /* This magically ignores index 0, for usual page size. */ 1686 bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >> 1687 MAP_SPLIT_BOUNDARY_SHIFT; 1688 if (bidx >= MAXPAGESIZES) 1689 return (KERN_INVALID_ARGUMENT); 1690 bdry = pagesizes[bidx] - 1; 1691 if ((start & bdry) != 0 || (end & bdry) != 0) 1692 return (KERN_INVALID_ARGUMENT); 1693 protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 1694 } 1695 1696 cred = NULL; 1697 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1698 goto charged; 1699 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1700 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1701 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1702 return (KERN_RESOURCE_SHORTAGE); 1703 KASSERT(object == NULL || 1704 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1705 object->cred == NULL, 1706 ("overcommit: vm_map_insert o %p", object)); 1707 cred = curthread->td_ucred; 1708 } 1709 1710 charged: 1711 /* Expand the kernel pmap, if necessary. */ 1712 if (map == kernel_map && end > kernel_vm_end) 1713 pmap_growkernel(end); 1714 if (object != NULL) { 1715 /* 1716 * OBJ_ONEMAPPING must be cleared unless this mapping 1717 * is trivially proven to be the only mapping for any 1718 * of the object's pages. (Object granularity 1719 * reference counting is insufficient to recognize 1720 * aliases with precision.) 1721 */ 1722 if ((object->flags & OBJ_ANON) != 0) { 1723 VM_OBJECT_WLOCK(object); 1724 if (object->ref_count > 1 || object->shadow_count != 0) 1725 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1726 VM_OBJECT_WUNLOCK(object); 1727 } 1728 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1729 protoeflags && 1730 (cow & (MAP_STACK_AREA | MAP_VN_EXEC)) == 0 && 1731 prev_entry->end == start && (prev_entry->cred == cred || 1732 (prev_entry->object.vm_object != NULL && 1733 prev_entry->object.vm_object->cred == cred)) && 1734 vm_object_coalesce(prev_entry->object.vm_object, 1735 prev_entry->offset, 1736 (vm_size_t)(prev_entry->end - prev_entry->start), 1737 (vm_size_t)(end - prev_entry->end), cred != NULL && 1738 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1739 /* 1740 * We were able to extend the object. Determine if we 1741 * can extend the previous map entry to include the 1742 * new range as well. 1743 */ 1744 if (prev_entry->inheritance == inheritance && 1745 prev_entry->protection == prot && 1746 prev_entry->max_protection == max && 1747 prev_entry->wired_count == 0) { 1748 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1749 0, ("prev_entry %p has incoherent wiring", 1750 prev_entry)); 1751 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1752 map->size += end - prev_entry->end; 1753 vm_map_entry_resize(map, prev_entry, 1754 end - prev_entry->end); 1755 *res = vm_map_try_merge_entries(map, prev_entry, 1756 next_entry); 1757 return (KERN_SUCCESS); 1758 } 1759 1760 /* 1761 * If we can extend the object but cannot extend the 1762 * map entry, we have to create a new map entry. We 1763 * must bump the ref count on the extended object to 1764 * account for it. object may be NULL. 1765 */ 1766 object = prev_entry->object.vm_object; 1767 offset = prev_entry->offset + 1768 (prev_entry->end - prev_entry->start); 1769 vm_object_reference(object); 1770 if (cred != NULL && object != NULL && object->cred != NULL && 1771 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1772 /* Object already accounts for this uid. */ 1773 cred = NULL; 1774 } 1775 } 1776 if (cred != NULL) 1777 crhold(cred); 1778 1779 /* 1780 * Create a new entry 1781 */ 1782 new_entry = vm_map_entry_create(map); 1783 new_entry->start = start; 1784 new_entry->end = end; 1785 new_entry->cred = NULL; 1786 1787 new_entry->eflags = protoeflags; 1788 new_entry->object.vm_object = object; 1789 new_entry->offset = offset; 1790 1791 new_entry->inheritance = inheritance; 1792 new_entry->protection = prot; 1793 new_entry->max_protection = max; 1794 new_entry->wired_count = 0; 1795 new_entry->wiring_thread = NULL; 1796 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1797 new_entry->next_read = start; 1798 1799 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1800 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1801 new_entry->cred = cred; 1802 1803 /* 1804 * Insert the new entry into the list 1805 */ 1806 vm_map_entry_link(map, new_entry); 1807 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1808 map->size += new_entry->end - new_entry->start; 1809 1810 /* 1811 * Try to coalesce the new entry with both the previous and next 1812 * entries in the list. Previously, we only attempted to coalesce 1813 * with the previous entry when object is NULL. Here, we handle the 1814 * other cases, which are less common. 1815 */ 1816 vm_map_try_merge_entries(map, prev_entry, new_entry); 1817 *res = vm_map_try_merge_entries(map, new_entry, next_entry); 1818 1819 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1820 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1821 end - start, cow & MAP_PREFAULT_PARTIAL); 1822 } 1823 1824 return (KERN_SUCCESS); 1825 } 1826 1827 /* 1828 * vm_map_insert: 1829 * 1830 * Inserts the given VM object into the target map at the 1831 * specified address range. 1832 * 1833 * Requires that the map be locked, and leaves it so. 1834 * 1835 * If object is non-NULL, ref count must be bumped by caller 1836 * prior to making call to account for the new entry. 1837 */ 1838 int 1839 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1840 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1841 { 1842 vm_map_entry_t res; 1843 1844 return (vm_map_insert1(map, object, offset, start, end, prot, max, 1845 cow, &res)); 1846 } 1847 1848 /* 1849 * vm_map_findspace: 1850 * 1851 * Find the first fit (lowest VM address) for "length" free bytes 1852 * beginning at address >= start in the given map. 1853 * 1854 * In a vm_map_entry, "max_free" is the maximum amount of 1855 * contiguous free space between an entry in its subtree and a 1856 * neighbor of that entry. This allows finding a free region in 1857 * one path down the tree, so O(log n) amortized with splay 1858 * trees. 1859 * 1860 * The map must be locked, and leaves it so. 1861 * 1862 * Returns: starting address if sufficient space, 1863 * vm_map_max(map)-length+1 if insufficient space. 1864 */ 1865 vm_offset_t 1866 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1867 { 1868 vm_map_entry_t header, llist, rlist, root, y; 1869 vm_size_t left_length, max_free_left, max_free_right; 1870 vm_offset_t gap_end; 1871 1872 VM_MAP_ASSERT_LOCKED(map); 1873 1874 /* 1875 * Request must fit within min/max VM address and must avoid 1876 * address wrap. 1877 */ 1878 start = MAX(start, vm_map_min(map)); 1879 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) 1880 return (vm_map_max(map) - length + 1); 1881 1882 /* Empty tree means wide open address space. */ 1883 if (map->root == NULL) 1884 return (start); 1885 1886 /* 1887 * After splay_split, if start is within an entry, push it to the start 1888 * of the following gap. If rlist is at the end of the gap containing 1889 * start, save the end of that gap in gap_end to see if the gap is big 1890 * enough; otherwise set gap_end to start skip gap-checking and move 1891 * directly to a search of the right subtree. 1892 */ 1893 header = &map->header; 1894 root = vm_map_splay_split(map, start, length, &llist, &rlist); 1895 gap_end = rlist->start; 1896 if (root != NULL) { 1897 start = root->end; 1898 if (root->right != rlist) 1899 gap_end = start; 1900 max_free_left = vm_map_splay_merge_left(header, root, llist); 1901 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1902 } else if (rlist != header) { 1903 root = rlist; 1904 rlist = root->left; 1905 max_free_left = vm_map_splay_merge_pred(header, root, llist); 1906 max_free_right = vm_map_splay_merge_right(header, root, rlist); 1907 } else { 1908 root = llist; 1909 llist = root->right; 1910 max_free_left = vm_map_splay_merge_left(header, root, llist); 1911 max_free_right = vm_map_splay_merge_succ(header, root, rlist); 1912 } 1913 root->max_free = vm_size_max(max_free_left, max_free_right); 1914 map->root = root; 1915 VM_MAP_ASSERT_CONSISTENT(map); 1916 if (length <= gap_end - start) 1917 return (start); 1918 1919 /* With max_free, can immediately tell if no solution. */ 1920 if (root->right == header || length > root->right->max_free) 1921 return (vm_map_max(map) - length + 1); 1922 1923 /* 1924 * Splay for the least large-enough gap in the right subtree. 1925 */ 1926 llist = rlist = header; 1927 for (left_length = 0;; 1928 left_length = vm_map_entry_max_free_left(root, llist)) { 1929 if (length <= left_length) 1930 SPLAY_LEFT_STEP(root, y, llist, rlist, 1931 length <= vm_map_entry_max_free_left(y, llist)); 1932 else 1933 SPLAY_RIGHT_STEP(root, y, llist, rlist, 1934 length > vm_map_entry_max_free_left(y, root)); 1935 if (root == NULL) 1936 break; 1937 } 1938 root = llist; 1939 llist = root->right; 1940 max_free_left = vm_map_splay_merge_left(header, root, llist); 1941 if (rlist == header) { 1942 root->max_free = vm_size_max(max_free_left, 1943 vm_map_splay_merge_succ(header, root, rlist)); 1944 } else { 1945 y = rlist; 1946 rlist = y->left; 1947 y->max_free = vm_size_max( 1948 vm_map_splay_merge_pred(root, y, root), 1949 vm_map_splay_merge_right(header, y, rlist)); 1950 root->max_free = vm_size_max(max_free_left, y->max_free); 1951 } 1952 map->root = root; 1953 VM_MAP_ASSERT_CONSISTENT(map); 1954 return (root->end); 1955 } 1956 1957 int 1958 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1959 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1960 vm_prot_t max, int cow) 1961 { 1962 vm_offset_t end; 1963 int result; 1964 1965 end = start + length; 1966 KASSERT((cow & MAP_STACK_AREA) == 0 || object == NULL, 1967 ("vm_map_fixed: non-NULL backing object for stack")); 1968 vm_map_lock(map); 1969 VM_MAP_RANGE_CHECK(map, start, end); 1970 if ((cow & MAP_CHECK_EXCL) == 0) { 1971 result = vm_map_delete(map, start, end); 1972 if (result != KERN_SUCCESS) 1973 goto out; 1974 } 1975 if ((cow & MAP_STACK_AREA) != 0) { 1976 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1977 prot, max, cow); 1978 } else { 1979 result = vm_map_insert(map, object, offset, start, end, 1980 prot, max, cow); 1981 } 1982 out: 1983 vm_map_unlock(map); 1984 return (result); 1985 } 1986 1987 #if VM_NRESERVLEVEL <= 1 1988 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1989 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1990 #elif VM_NRESERVLEVEL == 2 1991 static const int aslr_pages_rnd_64[3] = {0x1000, 0x1000, 0x10}; 1992 static const int aslr_pages_rnd_32[3] = {0x100, 0x100, 0x4}; 1993 #else 1994 #error "Unsupported VM_NRESERVLEVEL" 1995 #endif 1996 1997 static int cluster_anon = 1; 1998 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 1999 &cluster_anon, 0, 2000 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 2001 2002 static bool 2003 clustering_anon_allowed(vm_offset_t addr, int cow) 2004 { 2005 2006 switch (cluster_anon) { 2007 case 0: 2008 return (false); 2009 case 1: 2010 return (addr == 0 || (cow & MAP_NO_HINT) != 0); 2011 case 2: 2012 default: 2013 return (true); 2014 } 2015 } 2016 2017 static long aslr_restarts; 2018 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 2019 &aslr_restarts, 0, 2020 "Number of aslr failures"); 2021 2022 /* 2023 * Searches for the specified amount of free space in the given map with the 2024 * specified alignment. Performs an address-ordered, first-fit search from 2025 * the given address "*addr", with an optional upper bound "max_addr". If the 2026 * parameter "alignment" is zero, then the alignment is computed from the 2027 * given (object, offset) pair so as to enable the greatest possible use of 2028 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 2029 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 2030 * 2031 * The map must be locked. Initially, there must be at least "length" bytes 2032 * of free space at the given address. 2033 */ 2034 static int 2035 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2036 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 2037 vm_offset_t alignment) 2038 { 2039 vm_offset_t aligned_addr, free_addr; 2040 2041 VM_MAP_ASSERT_LOCKED(map); 2042 free_addr = *addr; 2043 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 2044 ("caller failed to provide space %#jx at address %p", 2045 (uintmax_t)length, (void *)free_addr)); 2046 for (;;) { 2047 /* 2048 * At the start of every iteration, the free space at address 2049 * "*addr" is at least "length" bytes. 2050 */ 2051 if (alignment == 0) 2052 pmap_align_superpage(object, offset, addr, length); 2053 else 2054 *addr = roundup2(*addr, alignment); 2055 aligned_addr = *addr; 2056 if (aligned_addr == free_addr) { 2057 /* 2058 * Alignment did not change "*addr", so "*addr" must 2059 * still provide sufficient free space. 2060 */ 2061 return (KERN_SUCCESS); 2062 } 2063 2064 /* 2065 * Test for address wrap on "*addr". A wrapped "*addr" could 2066 * be a valid address, in which case vm_map_findspace() cannot 2067 * be relied upon to fail. 2068 */ 2069 if (aligned_addr < free_addr) 2070 return (KERN_NO_SPACE); 2071 *addr = vm_map_findspace(map, aligned_addr, length); 2072 if (*addr + length > vm_map_max(map) || 2073 (max_addr != 0 && *addr + length > max_addr)) 2074 return (KERN_NO_SPACE); 2075 free_addr = *addr; 2076 if (free_addr == aligned_addr) { 2077 /* 2078 * If a successful call to vm_map_findspace() did not 2079 * change "*addr", then "*addr" must still be aligned 2080 * and provide sufficient free space. 2081 */ 2082 return (KERN_SUCCESS); 2083 } 2084 } 2085 } 2086 2087 int 2088 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length, 2089 vm_offset_t max_addr, vm_offset_t alignment) 2090 { 2091 /* XXXKIB ASLR eh ? */ 2092 *addr = vm_map_findspace(map, *addr, length); 2093 if (*addr + length > vm_map_max(map) || 2094 (max_addr != 0 && *addr + length > max_addr)) 2095 return (KERN_NO_SPACE); 2096 return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr, 2097 alignment)); 2098 } 2099 2100 /* 2101 * vm_map_find finds an unallocated region in the target address 2102 * map with the given length. The search is defined to be 2103 * first-fit from the specified address; the region found is 2104 * returned in the same parameter. 2105 * 2106 * If object is non-NULL, ref count must be bumped by caller 2107 * prior to making call to account for the new entry. 2108 */ 2109 int 2110 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2111 vm_offset_t *addr, /* IN/OUT */ 2112 vm_size_t length, vm_offset_t max_addr, int find_space, 2113 vm_prot_t prot, vm_prot_t max, int cow) 2114 { 2115 int rv; 2116 2117 vm_map_lock(map); 2118 rv = vm_map_find_locked(map, object, offset, addr, length, max_addr, 2119 find_space, prot, max, cow); 2120 vm_map_unlock(map); 2121 return (rv); 2122 } 2123 2124 int 2125 vm_map_find_locked(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2126 vm_offset_t *addr, /* IN/OUT */ 2127 vm_size_t length, vm_offset_t max_addr, int find_space, 2128 vm_prot_t prot, vm_prot_t max, int cow) 2129 { 2130 vm_offset_t alignment, curr_min_addr, min_addr; 2131 int gap, pidx, rv, try; 2132 bool cluster, en_aslr, update_anon; 2133 2134 KASSERT((cow & MAP_STACK_AREA) == 0 || object == NULL, 2135 ("non-NULL backing object for stack")); 2136 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 2137 (cow & MAP_STACK_AREA) == 0)); 2138 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 2139 (object->flags & OBJ_COLORED) == 0)) 2140 find_space = VMFS_ANY_SPACE; 2141 if (find_space >> 8 != 0) { 2142 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 2143 alignment = (vm_offset_t)1 << (find_space >> 8); 2144 } else 2145 alignment = 0; 2146 en_aslr = (map->flags & MAP_ASLR) != 0; 2147 update_anon = cluster = clustering_anon_allowed(*addr, cow) && 2148 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 2149 find_space != VMFS_NO_SPACE && object == NULL && 2150 (cow & (MAP_INHERIT_SHARE | MAP_STACK_AREA)) == 0 && 2151 prot != PROT_NONE; 2152 curr_min_addr = min_addr = *addr; 2153 if (en_aslr && min_addr == 0 && !cluster && 2154 find_space != VMFS_NO_SPACE && 2155 (map->flags & MAP_ASLR_IGNSTART) != 0) 2156 curr_min_addr = min_addr = vm_map_min(map); 2157 try = 0; 2158 if (cluster) { 2159 curr_min_addr = map->anon_loc; 2160 if (curr_min_addr == 0) 2161 cluster = false; 2162 } 2163 if (find_space != VMFS_NO_SPACE) { 2164 KASSERT(find_space == VMFS_ANY_SPACE || 2165 find_space == VMFS_OPTIMAL_SPACE || 2166 find_space == VMFS_SUPER_SPACE || 2167 alignment != 0, ("unexpected VMFS flag")); 2168 again: 2169 /* 2170 * When creating an anonymous mapping, try clustering 2171 * with an existing anonymous mapping first. 2172 * 2173 * We make up to two attempts to find address space 2174 * for a given find_space value. The first attempt may 2175 * apply randomization or may cluster with an existing 2176 * anonymous mapping. If this first attempt fails, 2177 * perform a first-fit search of the available address 2178 * space. 2179 * 2180 * If all tries failed, and find_space is 2181 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 2182 * Again enable clustering and randomization. 2183 */ 2184 try++; 2185 MPASS(try <= 2); 2186 2187 if (try == 2) { 2188 /* 2189 * Second try: we failed either to find a 2190 * suitable region for randomizing the 2191 * allocation, or to cluster with an existing 2192 * mapping. Retry with free run. 2193 */ 2194 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 2195 vm_map_min(map) : min_addr; 2196 atomic_add_long(&aslr_restarts, 1); 2197 } 2198 2199 if (try == 1 && en_aslr && !cluster) { 2200 /* 2201 * Find space for allocation, including 2202 * gap needed for later randomization. 2203 */ 2204 pidx = 0; 2205 #if VM_NRESERVLEVEL > 0 2206 if ((find_space == VMFS_SUPER_SPACE || 2207 find_space == VMFS_OPTIMAL_SPACE) && 2208 pagesizes[VM_NRESERVLEVEL] != 0) { 2209 /* 2210 * Do not pointlessly increase the space that 2211 * is requested from vm_map_findspace(). 2212 * pmap_align_superpage() will only change a 2213 * mapping's alignment if that mapping is at 2214 * least a superpage in size. 2215 */ 2216 pidx = VM_NRESERVLEVEL; 2217 while (pidx > 0 && length < pagesizes[pidx]) 2218 pidx--; 2219 } 2220 #endif 2221 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 2222 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 2223 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 2224 *addr = vm_map_findspace(map, curr_min_addr, 2225 length + gap * pagesizes[pidx]); 2226 if (*addr + length + gap * pagesizes[pidx] > 2227 vm_map_max(map)) 2228 goto again; 2229 /* And randomize the start address. */ 2230 *addr += (arc4random() % gap) * pagesizes[pidx]; 2231 if (max_addr != 0 && *addr + length > max_addr) 2232 goto again; 2233 } else { 2234 *addr = vm_map_findspace(map, curr_min_addr, length); 2235 if (*addr + length > vm_map_max(map) || 2236 (max_addr != 0 && *addr + length > max_addr)) { 2237 if (cluster) { 2238 cluster = false; 2239 MPASS(try == 1); 2240 goto again; 2241 } 2242 return (KERN_NO_SPACE); 2243 } 2244 } 2245 2246 if (find_space != VMFS_ANY_SPACE && 2247 (rv = vm_map_alignspace(map, object, offset, addr, length, 2248 max_addr, alignment)) != KERN_SUCCESS) { 2249 if (find_space == VMFS_OPTIMAL_SPACE) { 2250 find_space = VMFS_ANY_SPACE; 2251 curr_min_addr = min_addr; 2252 cluster = update_anon; 2253 try = 0; 2254 goto again; 2255 } 2256 return (rv); 2257 } 2258 } else if ((cow & MAP_REMAP) != 0) { 2259 if (!vm_map_range_valid(map, *addr, *addr + length)) 2260 return (KERN_INVALID_ADDRESS); 2261 rv = vm_map_delete(map, *addr, *addr + length); 2262 if (rv != KERN_SUCCESS) 2263 return (rv); 2264 } 2265 if ((cow & MAP_STACK_AREA) != 0) { 2266 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 2267 max, cow); 2268 } else { 2269 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 2270 prot, max, cow); 2271 } 2272 2273 /* 2274 * Update the starting address for clustered anonymous memory mappings 2275 * if a starting address was not previously defined or an ASLR restart 2276 * placed an anonymous memory mapping at a lower address. 2277 */ 2278 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || 2279 *addr < map->anon_loc)) 2280 map->anon_loc = *addr; 2281 return (rv); 2282 } 2283 2284 /* 2285 * vm_map_find_min() is a variant of vm_map_find() that takes an 2286 * additional parameter ("default_addr") and treats the given address 2287 * ("*addr") differently. Specifically, it treats "*addr" as a hint 2288 * and not as the minimum address where the mapping is created. 2289 * 2290 * This function works in two phases. First, it tries to 2291 * allocate above the hint. If that fails and the hint is 2292 * greater than "default_addr", it performs a second pass, replacing 2293 * the hint with "default_addr" as the minimum address for the 2294 * allocation. 2295 */ 2296 int 2297 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2298 vm_offset_t *addr, vm_size_t length, vm_offset_t default_addr, 2299 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 2300 int cow) 2301 { 2302 vm_offset_t hint; 2303 int rv; 2304 2305 hint = *addr; 2306 if (hint == 0) { 2307 cow |= MAP_NO_HINT; 2308 *addr = hint = default_addr; 2309 } 2310 for (;;) { 2311 rv = vm_map_find(map, object, offset, addr, length, max_addr, 2312 find_space, prot, max, cow); 2313 if (rv == KERN_SUCCESS || default_addr >= hint) 2314 return (rv); 2315 *addr = hint = default_addr; 2316 } 2317 } 2318 2319 /* 2320 * A map entry with any of the following flags set must not be merged with 2321 * another entry. 2322 */ 2323 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | \ 2324 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \ 2325 MAP_ENTRY_STACK_GAP) 2326 2327 static bool 2328 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 2329 { 2330 2331 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 2332 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 2333 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 2334 prev, entry)); 2335 return (prev->end == entry->start && 2336 prev->object.vm_object == entry->object.vm_object && 2337 (prev->object.vm_object == NULL || 2338 prev->offset + (prev->end - prev->start) == entry->offset) && 2339 prev->eflags == entry->eflags && 2340 prev->protection == entry->protection && 2341 prev->max_protection == entry->max_protection && 2342 prev->inheritance == entry->inheritance && 2343 prev->wired_count == entry->wired_count && 2344 prev->cred == entry->cred); 2345 } 2346 2347 static void 2348 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2349 { 2350 2351 /* 2352 * If the backing object is a vnode object, vm_object_deallocate() 2353 * calls vrele(). However, vrele() does not lock the vnode because 2354 * the vnode has additional references. Thus, the map lock can be 2355 * kept without causing a lock-order reversal with the vnode lock. 2356 * 2357 * Since we count the number of virtual page mappings in 2358 * object->un_pager.vnp.writemappings, the writemappings value 2359 * should not be adjusted when the entry is disposed of. 2360 */ 2361 if (entry->object.vm_object != NULL) 2362 vm_object_deallocate(entry->object.vm_object); 2363 if (entry->cred != NULL) 2364 crfree(entry->cred); 2365 vm_map_entry_dispose(map, entry); 2366 } 2367 2368 /* 2369 * vm_map_try_merge_entries: 2370 * 2371 * Compare two map entries that represent consecutive ranges. If 2372 * the entries can be merged, expand the range of the second to 2373 * cover the range of the first and delete the first. Then return 2374 * the map entry that includes the first range. 2375 * 2376 * The map must be locked. 2377 */ 2378 vm_map_entry_t 2379 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, 2380 vm_map_entry_t entry) 2381 { 2382 2383 VM_MAP_ASSERT_LOCKED(map); 2384 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && 2385 vm_map_mergeable_neighbors(prev_entry, entry)) { 2386 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT); 2387 vm_map_merged_neighbor_dispose(map, prev_entry); 2388 return (entry); 2389 } 2390 return (prev_entry); 2391 } 2392 2393 /* 2394 * vm_map_entry_back: 2395 * 2396 * Allocate an object to back a map entry. 2397 */ 2398 static inline void 2399 vm_map_entry_back(vm_map_entry_t entry) 2400 { 2401 vm_object_t object; 2402 2403 KASSERT(entry->object.vm_object == NULL, 2404 ("map entry %p has backing object", entry)); 2405 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2406 ("map entry %p is a submap", entry)); 2407 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, 2408 entry->cred, entry->end - entry->start); 2409 entry->object.vm_object = object; 2410 entry->offset = 0; 2411 entry->cred = NULL; 2412 } 2413 2414 /* 2415 * vm_map_entry_charge_object 2416 * 2417 * If there is no object backing this entry, create one. Otherwise, if 2418 * the entry has cred, give it to the backing object. 2419 */ 2420 static inline void 2421 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) 2422 { 2423 2424 VM_MAP_ASSERT_LOCKED(map); 2425 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2426 ("map entry %p is a submap", entry)); 2427 if (entry->object.vm_object == NULL && !map->system_map && 2428 (entry->eflags & MAP_ENTRY_GUARD) == 0) 2429 vm_map_entry_back(entry); 2430 else if (entry->object.vm_object != NULL && 2431 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2432 entry->cred != NULL) { 2433 VM_OBJECT_WLOCK(entry->object.vm_object); 2434 KASSERT(entry->object.vm_object->cred == NULL, 2435 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); 2436 entry->object.vm_object->cred = entry->cred; 2437 entry->object.vm_object->charge = entry->end - entry->start; 2438 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2439 entry->cred = NULL; 2440 } 2441 } 2442 2443 /* 2444 * vm_map_entry_clone 2445 * 2446 * Create a duplicate map entry for clipping. 2447 */ 2448 static vm_map_entry_t 2449 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry) 2450 { 2451 vm_map_entry_t new_entry; 2452 2453 VM_MAP_ASSERT_LOCKED(map); 2454 2455 /* 2456 * Create a backing object now, if none exists, so that more individual 2457 * objects won't be created after the map entry is split. 2458 */ 2459 vm_map_entry_charge_object(map, entry); 2460 2461 /* Clone the entry. */ 2462 new_entry = vm_map_entry_create(map); 2463 *new_entry = *entry; 2464 if (new_entry->cred != NULL) 2465 crhold(entry->cred); 2466 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2467 vm_object_reference(new_entry->object.vm_object); 2468 vm_map_entry_set_vnode_text(new_entry, true); 2469 /* 2470 * The object->un_pager.vnp.writemappings for the object of 2471 * MAP_ENTRY_WRITECNT type entry shall be kept as is here. The 2472 * virtual pages are re-distributed among the clipped entries, 2473 * so the sum is left the same. 2474 */ 2475 } 2476 return (new_entry); 2477 } 2478 2479 /* 2480 * vm_map_clip_start: [ internal use only ] 2481 * 2482 * Asserts that the given entry begins at or after 2483 * the specified address; if necessary, 2484 * it splits the entry into two. 2485 */ 2486 static int 2487 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr) 2488 { 2489 vm_map_entry_t new_entry; 2490 int bdry_idx; 2491 2492 if (!map->system_map) 2493 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2494 "%s: map %p entry %p start 0x%jx", __func__, map, entry, 2495 (uintmax_t)startaddr); 2496 2497 if (startaddr <= entry->start) 2498 return (KERN_SUCCESS); 2499 2500 VM_MAP_ASSERT_LOCKED(map); 2501 KASSERT(entry->end > startaddr && entry->start < startaddr, 2502 ("%s: invalid clip of entry %p", __func__, entry)); 2503 2504 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2505 if (bdry_idx != 0) { 2506 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) 2507 return (KERN_INVALID_ARGUMENT); 2508 } 2509 2510 new_entry = vm_map_entry_clone(map, entry); 2511 2512 /* 2513 * Split off the front portion. Insert the new entry BEFORE this one, 2514 * so that this entry has the specified starting address. 2515 */ 2516 new_entry->end = startaddr; 2517 vm_map_entry_link(map, new_entry); 2518 return (KERN_SUCCESS); 2519 } 2520 2521 /* 2522 * vm_map_lookup_clip_start: 2523 * 2524 * Find the entry at or just after 'start', and clip it if 'start' is in 2525 * the interior of the entry. Return entry after 'start', and in 2526 * prev_entry set the entry before 'start'. 2527 */ 2528 static int 2529 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, 2530 vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry) 2531 { 2532 vm_map_entry_t entry; 2533 int rv; 2534 2535 if (!map->system_map) 2536 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2537 "%s: map %p start 0x%jx prev %p", __func__, map, 2538 (uintmax_t)start, prev_entry); 2539 2540 if (vm_map_lookup_entry(map, start, prev_entry)) { 2541 entry = *prev_entry; 2542 rv = vm_map_clip_start(map, entry, start); 2543 if (rv != KERN_SUCCESS) 2544 return (rv); 2545 *prev_entry = vm_map_entry_pred(entry); 2546 } else 2547 entry = vm_map_entry_succ(*prev_entry); 2548 *res_entry = entry; 2549 return (KERN_SUCCESS); 2550 } 2551 2552 /* 2553 * vm_map_clip_end: [ internal use only ] 2554 * 2555 * Asserts that the given entry ends at or before 2556 * the specified address; if necessary, 2557 * it splits the entry into two. 2558 */ 2559 static int 2560 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr) 2561 { 2562 vm_map_entry_t new_entry; 2563 int bdry_idx; 2564 2565 if (!map->system_map) 2566 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2567 "%s: map %p entry %p end 0x%jx", __func__, map, entry, 2568 (uintmax_t)endaddr); 2569 2570 if (endaddr >= entry->end) 2571 return (KERN_SUCCESS); 2572 2573 VM_MAP_ASSERT_LOCKED(map); 2574 KASSERT(entry->start < endaddr && entry->end > endaddr, 2575 ("%s: invalid clip of entry %p", __func__, entry)); 2576 2577 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 2578 if (bdry_idx != 0) { 2579 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) 2580 return (KERN_INVALID_ARGUMENT); 2581 } 2582 2583 new_entry = vm_map_entry_clone(map, entry); 2584 2585 /* 2586 * Split off the back portion. Insert the new entry AFTER this one, 2587 * so that this entry has the specified ending address. 2588 */ 2589 new_entry->start = endaddr; 2590 vm_map_entry_link(map, new_entry); 2591 2592 return (KERN_SUCCESS); 2593 } 2594 2595 /* 2596 * vm_map_submap: [ kernel use only ] 2597 * 2598 * Mark the given range as handled by a subordinate map. 2599 * 2600 * This range must have been created with vm_map_find, 2601 * and no other operations may have been performed on this 2602 * range prior to calling vm_map_submap. 2603 * 2604 * Only a limited number of operations can be performed 2605 * within this rage after calling vm_map_submap: 2606 * vm_fault 2607 * [Don't try vm_map_copy!] 2608 * 2609 * To remove a submapping, one must first remove the 2610 * range from the superior map, and then destroy the 2611 * submap (if desired). [Better yet, don't try it.] 2612 */ 2613 int 2614 vm_map_submap( 2615 vm_map_t map, 2616 vm_offset_t start, 2617 vm_offset_t end, 2618 vm_map_t submap) 2619 { 2620 vm_map_entry_t entry; 2621 int result; 2622 2623 result = KERN_INVALID_ARGUMENT; 2624 2625 vm_map_lock(submap); 2626 submap->flags |= MAP_IS_SUB_MAP; 2627 vm_map_unlock(submap); 2628 2629 vm_map_lock(map); 2630 VM_MAP_RANGE_CHECK(map, start, end); 2631 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && 2632 (entry->eflags & MAP_ENTRY_COW) == 0 && 2633 entry->object.vm_object == NULL) { 2634 result = vm_map_clip_start(map, entry, start); 2635 if (result != KERN_SUCCESS) 2636 goto unlock; 2637 result = vm_map_clip_end(map, entry, end); 2638 if (result != KERN_SUCCESS) 2639 goto unlock; 2640 entry->object.sub_map = submap; 2641 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2642 result = KERN_SUCCESS; 2643 } 2644 unlock: 2645 vm_map_unlock(map); 2646 2647 if (result != KERN_SUCCESS) { 2648 vm_map_lock(submap); 2649 submap->flags &= ~MAP_IS_SUB_MAP; 2650 vm_map_unlock(submap); 2651 } 2652 return (result); 2653 } 2654 2655 /* 2656 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2657 */ 2658 #define MAX_INIT_PT 96 2659 2660 /* 2661 * vm_map_pmap_enter: 2662 * 2663 * Preload the specified map's pmap with mappings to the specified 2664 * object's memory-resident pages. No further physical pages are 2665 * allocated, and no further virtual pages are retrieved from secondary 2666 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2667 * limited number of page mappings are created at the low-end of the 2668 * specified address range. (For this purpose, a superpage mapping 2669 * counts as one page mapping.) Otherwise, all resident pages within 2670 * the specified address range are mapped. 2671 */ 2672 static void 2673 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2674 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2675 { 2676 vm_offset_t start; 2677 vm_page_t p, p_start; 2678 vm_pindex_t mask, psize, threshold, tmpidx; 2679 int psind; 2680 2681 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2682 return; 2683 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2684 VM_OBJECT_WLOCK(object); 2685 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2686 pmap_object_init_pt(map->pmap, addr, object, pindex, 2687 size); 2688 VM_OBJECT_WUNLOCK(object); 2689 return; 2690 } 2691 VM_OBJECT_LOCK_DOWNGRADE(object); 2692 } else 2693 VM_OBJECT_RLOCK(object); 2694 2695 psize = atop(size); 2696 if (psize + pindex > object->size) { 2697 if (pindex >= object->size) { 2698 VM_OBJECT_RUNLOCK(object); 2699 return; 2700 } 2701 psize = object->size - pindex; 2702 } 2703 2704 start = 0; 2705 p_start = NULL; 2706 threshold = MAX_INIT_PT; 2707 2708 p = vm_page_find_least(object, pindex); 2709 /* 2710 * Assert: the variable p is either (1) the page with the 2711 * least pindex greater than or equal to the parameter pindex 2712 * or (2) NULL. 2713 */ 2714 for (; 2715 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2716 p = TAILQ_NEXT(p, listq)) { 2717 /* 2718 * don't allow an madvise to blow away our really 2719 * free pages allocating pv entries. 2720 */ 2721 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2722 vm_page_count_severe()) || 2723 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2724 tmpidx >= threshold)) { 2725 psize = tmpidx; 2726 break; 2727 } 2728 if (vm_page_all_valid(p)) { 2729 if (p_start == NULL) { 2730 start = addr + ptoa(tmpidx); 2731 p_start = p; 2732 } 2733 /* Jump ahead if a superpage mapping is possible. */ 2734 for (psind = p->psind; psind > 0; psind--) { 2735 if (((addr + ptoa(tmpidx)) & 2736 (pagesizes[psind] - 1)) == 0) { 2737 mask = atop(pagesizes[psind]) - 1; 2738 if (tmpidx + mask < psize && 2739 vm_page_ps_test(p, psind, 2740 PS_ALL_VALID, NULL)) { 2741 p += mask; 2742 threshold += mask; 2743 break; 2744 } 2745 } 2746 } 2747 } else if (p_start != NULL) { 2748 pmap_enter_object(map->pmap, start, addr + 2749 ptoa(tmpidx), p_start, prot); 2750 p_start = NULL; 2751 } 2752 } 2753 if (p_start != NULL) 2754 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2755 p_start, prot); 2756 VM_OBJECT_RUNLOCK(object); 2757 } 2758 2759 static void 2760 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot, 2761 vm_prot_t new_maxprot, int flags) 2762 { 2763 vm_prot_t old_prot; 2764 2765 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); 2766 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) 2767 return; 2768 2769 old_prot = PROT_EXTRACT(entry->offset); 2770 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2771 entry->offset = PROT_MAX(new_maxprot) | 2772 (new_maxprot & old_prot); 2773 } 2774 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) { 2775 entry->offset = new_prot | PROT_MAX( 2776 PROT_MAX_EXTRACT(entry->offset)); 2777 } 2778 } 2779 2780 /* 2781 * vm_map_protect: 2782 * 2783 * Sets the protection and/or the maximum protection of the 2784 * specified address region in the target map. 2785 */ 2786 int 2787 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2788 vm_prot_t new_prot, vm_prot_t new_maxprot, int flags) 2789 { 2790 vm_map_entry_t entry, first_entry, in_tran, prev_entry; 2791 vm_object_t obj; 2792 struct ucred *cred; 2793 vm_offset_t orig_start; 2794 vm_prot_t check_prot, max_prot, old_prot; 2795 int rv; 2796 2797 if (start == end) 2798 return (KERN_SUCCESS); 2799 2800 if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT | 2801 VM_MAP_PROTECT_SET_MAXPROT) && 2802 !CONTAINS_BITS(new_maxprot, new_prot)) 2803 return (KERN_OUT_OF_BOUNDS); 2804 2805 orig_start = start; 2806 again: 2807 in_tran = NULL; 2808 start = orig_start; 2809 vm_map_lock(map); 2810 2811 if ((map->flags & MAP_WXORX) != 0 && 2812 (flags & VM_MAP_PROTECT_SET_PROT) != 0 && 2813 CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) { 2814 vm_map_unlock(map); 2815 return (KERN_PROTECTION_FAILURE); 2816 } 2817 2818 /* 2819 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2820 * need to fault pages into the map and will drop the map lock while 2821 * doing so, and the VM object may end up in an inconsistent state if we 2822 * update the protection on the map entry in between faults. 2823 */ 2824 vm_map_wait_busy(map); 2825 2826 VM_MAP_RANGE_CHECK(map, start, end); 2827 2828 if (!vm_map_lookup_entry(map, start, &first_entry)) 2829 first_entry = vm_map_entry_succ(first_entry); 2830 2831 if ((flags & VM_MAP_PROTECT_GROWSDOWN) != 0 && 2832 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { 2833 /* 2834 * Handle Linux's PROT_GROWSDOWN flag. 2835 * It means that protection is applied down to the 2836 * whole stack, including the specified range of the 2837 * mapped region, and the grow down region (AKA 2838 * guard). 2839 */ 2840 while (!CONTAINS_BITS(first_entry->eflags, 2841 MAP_ENTRY_GUARD | MAP_ENTRY_STACK_GAP) && 2842 first_entry != vm_map_entry_first(map)) 2843 first_entry = vm_map_entry_pred(first_entry); 2844 start = first_entry->start; 2845 } 2846 2847 /* 2848 * Make a first pass to check for protection violations. 2849 */ 2850 check_prot = 0; 2851 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2852 check_prot |= new_prot; 2853 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) 2854 check_prot |= new_maxprot; 2855 for (entry = first_entry; entry->start < end; 2856 entry = vm_map_entry_succ(entry)) { 2857 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 2858 vm_map_unlock(map); 2859 return (KERN_INVALID_ARGUMENT); 2860 } 2861 if ((entry->eflags & (MAP_ENTRY_GUARD | 2862 MAP_ENTRY_STACK_GAP)) == MAP_ENTRY_GUARD) 2863 continue; 2864 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ? 2865 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; 2866 if (!CONTAINS_BITS(max_prot, check_prot)) { 2867 vm_map_unlock(map); 2868 return (KERN_PROTECTION_FAILURE); 2869 } 2870 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2871 in_tran = entry; 2872 } 2873 2874 /* 2875 * Postpone the operation until all in-transition map entries have 2876 * stabilized. An in-transition entry might already have its pages 2877 * wired and wired_count incremented, but not yet have its 2878 * MAP_ENTRY_USER_WIRED flag set. In which case, we would fail to call 2879 * vm_fault_copy_entry() in the final loop below. 2880 */ 2881 if (in_tran != NULL) { 2882 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2883 vm_map_unlock_and_wait(map, 0); 2884 goto again; 2885 } 2886 2887 /* 2888 * Before changing the protections, try to reserve swap space for any 2889 * private (i.e., copy-on-write) mappings that are transitioning from 2890 * read-only to read/write access. If a reservation fails, break out 2891 * of this loop early and let the next loop simplify the entries, since 2892 * some may now be mergeable. 2893 */ 2894 rv = vm_map_clip_start(map, first_entry, start); 2895 if (rv != KERN_SUCCESS) { 2896 vm_map_unlock(map); 2897 return (rv); 2898 } 2899 for (entry = first_entry; entry->start < end; 2900 entry = vm_map_entry_succ(entry)) { 2901 rv = vm_map_clip_end(map, entry, end); 2902 if (rv != KERN_SUCCESS) { 2903 vm_map_unlock(map); 2904 return (rv); 2905 } 2906 2907 if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 || 2908 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || 2909 ENTRY_CHARGED(entry) || 2910 (entry->eflags & MAP_ENTRY_GUARD) != 0) 2911 continue; 2912 2913 cred = curthread->td_ucred; 2914 obj = entry->object.vm_object; 2915 2916 if (obj == NULL || 2917 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { 2918 if (!swap_reserve(entry->end - entry->start)) { 2919 rv = KERN_RESOURCE_SHORTAGE; 2920 end = entry->end; 2921 break; 2922 } 2923 crhold(cred); 2924 entry->cred = cred; 2925 continue; 2926 } 2927 2928 VM_OBJECT_WLOCK(obj); 2929 if ((obj->flags & OBJ_SWAP) == 0) { 2930 VM_OBJECT_WUNLOCK(obj); 2931 continue; 2932 } 2933 2934 /* 2935 * Charge for the whole object allocation now, since 2936 * we cannot distinguish between non-charged and 2937 * charged clipped mapping of the same object later. 2938 */ 2939 KASSERT(obj->charge == 0, 2940 ("vm_map_protect: object %p overcharged (entry %p)", 2941 obj, entry)); 2942 if (!swap_reserve(ptoa(obj->size))) { 2943 VM_OBJECT_WUNLOCK(obj); 2944 rv = KERN_RESOURCE_SHORTAGE; 2945 end = entry->end; 2946 break; 2947 } 2948 2949 crhold(cred); 2950 obj->cred = cred; 2951 obj->charge = ptoa(obj->size); 2952 VM_OBJECT_WUNLOCK(obj); 2953 } 2954 2955 /* 2956 * If enough swap space was available, go back and fix up protections. 2957 * Otherwise, just simplify entries, since some may have been modified. 2958 * [Note that clipping is not necessary the second time.] 2959 */ 2960 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; 2961 entry->start < end; 2962 vm_map_try_merge_entries(map, prev_entry, entry), 2963 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 2964 if (rv != KERN_SUCCESS) 2965 continue; 2966 2967 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 2968 vm_map_protect_guard(entry, new_prot, new_maxprot, 2969 flags); 2970 continue; 2971 } 2972 2973 old_prot = entry->protection; 2974 2975 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) { 2976 entry->max_protection = new_maxprot; 2977 entry->protection = new_maxprot & old_prot; 2978 } 2979 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) 2980 entry->protection = new_prot; 2981 2982 /* 2983 * For user wired map entries, the normal lazy evaluation of 2984 * write access upgrades through soft page faults is 2985 * undesirable. Instead, immediately copy any pages that are 2986 * copy-on-write and enable write access in the physical map. 2987 */ 2988 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2989 (entry->protection & VM_PROT_WRITE) != 0 && 2990 (old_prot & VM_PROT_WRITE) == 0) 2991 vm_fault_copy_entry(map, map, entry, entry, NULL); 2992 2993 /* 2994 * When restricting access, update the physical map. Worry 2995 * about copy-on-write here. 2996 */ 2997 if ((old_prot & ~entry->protection) != 0) { 2998 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2999 VM_PROT_ALL) 3000 pmap_protect(map->pmap, entry->start, 3001 entry->end, 3002 entry->protection & MASK(entry)); 3003 #undef MASK 3004 } 3005 } 3006 vm_map_try_merge_entries(map, prev_entry, entry); 3007 vm_map_unlock(map); 3008 return (rv); 3009 } 3010 3011 /* 3012 * vm_map_madvise: 3013 * 3014 * This routine traverses a processes map handling the madvise 3015 * system call. Advisories are classified as either those effecting 3016 * the vm_map_entry structure, or those effecting the underlying 3017 * objects. 3018 */ 3019 int 3020 vm_map_madvise( 3021 vm_map_t map, 3022 vm_offset_t start, 3023 vm_offset_t end, 3024 int behav) 3025 { 3026 vm_map_entry_t entry, prev_entry; 3027 int rv; 3028 bool modify_map; 3029 3030 /* 3031 * Some madvise calls directly modify the vm_map_entry, in which case 3032 * we need to use an exclusive lock on the map and we need to perform 3033 * various clipping operations. Otherwise we only need a read-lock 3034 * on the map. 3035 */ 3036 switch(behav) { 3037 case MADV_NORMAL: 3038 case MADV_SEQUENTIAL: 3039 case MADV_RANDOM: 3040 case MADV_NOSYNC: 3041 case MADV_AUTOSYNC: 3042 case MADV_NOCORE: 3043 case MADV_CORE: 3044 if (start == end) 3045 return (0); 3046 modify_map = true; 3047 vm_map_lock(map); 3048 break; 3049 case MADV_WILLNEED: 3050 case MADV_DONTNEED: 3051 case MADV_FREE: 3052 if (start == end) 3053 return (0); 3054 modify_map = false; 3055 vm_map_lock_read(map); 3056 break; 3057 default: 3058 return (EINVAL); 3059 } 3060 3061 /* 3062 * Locate starting entry and clip if necessary. 3063 */ 3064 VM_MAP_RANGE_CHECK(map, start, end); 3065 3066 if (modify_map) { 3067 /* 3068 * madvise behaviors that are implemented in the vm_map_entry. 3069 * 3070 * We clip the vm_map_entry so that behavioral changes are 3071 * limited to the specified address range. 3072 */ 3073 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry); 3074 if (rv != KERN_SUCCESS) { 3075 vm_map_unlock(map); 3076 return (vm_mmap_to_errno(rv)); 3077 } 3078 3079 for (; entry->start < end; prev_entry = entry, 3080 entry = vm_map_entry_succ(entry)) { 3081 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 3082 continue; 3083 3084 rv = vm_map_clip_end(map, entry, end); 3085 if (rv != KERN_SUCCESS) { 3086 vm_map_unlock(map); 3087 return (vm_mmap_to_errno(rv)); 3088 } 3089 3090 switch (behav) { 3091 case MADV_NORMAL: 3092 vm_map_entry_set_behavior(entry, 3093 MAP_ENTRY_BEHAV_NORMAL); 3094 break; 3095 case MADV_SEQUENTIAL: 3096 vm_map_entry_set_behavior(entry, 3097 MAP_ENTRY_BEHAV_SEQUENTIAL); 3098 break; 3099 case MADV_RANDOM: 3100 vm_map_entry_set_behavior(entry, 3101 MAP_ENTRY_BEHAV_RANDOM); 3102 break; 3103 case MADV_NOSYNC: 3104 entry->eflags |= MAP_ENTRY_NOSYNC; 3105 break; 3106 case MADV_AUTOSYNC: 3107 entry->eflags &= ~MAP_ENTRY_NOSYNC; 3108 break; 3109 case MADV_NOCORE: 3110 entry->eflags |= MAP_ENTRY_NOCOREDUMP; 3111 break; 3112 case MADV_CORE: 3113 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; 3114 break; 3115 default: 3116 break; 3117 } 3118 vm_map_try_merge_entries(map, prev_entry, entry); 3119 } 3120 vm_map_try_merge_entries(map, prev_entry, entry); 3121 vm_map_unlock(map); 3122 } else { 3123 vm_pindex_t pstart, pend; 3124 3125 /* 3126 * madvise behaviors that are implemented in the underlying 3127 * vm_object. 3128 * 3129 * Since we don't clip the vm_map_entry, we have to clip 3130 * the vm_object pindex and count. 3131 */ 3132 if (!vm_map_lookup_entry(map, start, &entry)) 3133 entry = vm_map_entry_succ(entry); 3134 for (; entry->start < end; 3135 entry = vm_map_entry_succ(entry)) { 3136 vm_offset_t useEnd, useStart; 3137 3138 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | 3139 MAP_ENTRY_GUARD)) != 0) 3140 continue; 3141 3142 /* 3143 * MADV_FREE would otherwise rewind time to 3144 * the creation of the shadow object. Because 3145 * we hold the VM map read-locked, neither the 3146 * entry's object nor the presence of a 3147 * backing object can change. 3148 */ 3149 if (behav == MADV_FREE && 3150 entry->object.vm_object != NULL && 3151 entry->object.vm_object->backing_object != NULL) 3152 continue; 3153 3154 pstart = OFF_TO_IDX(entry->offset); 3155 pend = pstart + atop(entry->end - entry->start); 3156 useStart = entry->start; 3157 useEnd = entry->end; 3158 3159 if (entry->start < start) { 3160 pstart += atop(start - entry->start); 3161 useStart = start; 3162 } 3163 if (entry->end > end) { 3164 pend -= atop(entry->end - end); 3165 useEnd = end; 3166 } 3167 3168 if (pstart >= pend) 3169 continue; 3170 3171 /* 3172 * Perform the pmap_advise() before clearing 3173 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 3174 * concurrent pmap operation, such as pmap_remove(), 3175 * could clear a reference in the pmap and set 3176 * PGA_REFERENCED on the page before the pmap_advise() 3177 * had completed. Consequently, the page would appear 3178 * referenced based upon an old reference that 3179 * occurred before this pmap_advise() ran. 3180 */ 3181 if (behav == MADV_DONTNEED || behav == MADV_FREE) 3182 pmap_advise(map->pmap, useStart, useEnd, 3183 behav); 3184 3185 vm_object_madvise(entry->object.vm_object, pstart, 3186 pend, behav); 3187 3188 /* 3189 * Pre-populate paging structures in the 3190 * WILLNEED case. For wired entries, the 3191 * paging structures are already populated. 3192 */ 3193 if (behav == MADV_WILLNEED && 3194 entry->wired_count == 0) { 3195 vm_map_pmap_enter(map, 3196 useStart, 3197 entry->protection, 3198 entry->object.vm_object, 3199 pstart, 3200 ptoa(pend - pstart), 3201 MAP_PREFAULT_MADVISE 3202 ); 3203 } 3204 } 3205 vm_map_unlock_read(map); 3206 } 3207 return (0); 3208 } 3209 3210 /* 3211 * vm_map_inherit: 3212 * 3213 * Sets the inheritance of the specified address 3214 * range in the target map. Inheritance 3215 * affects how the map will be shared with 3216 * child maps at the time of vmspace_fork. 3217 */ 3218 int 3219 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 3220 vm_inherit_t new_inheritance) 3221 { 3222 vm_map_entry_t entry, lentry, prev_entry, start_entry; 3223 int rv; 3224 3225 switch (new_inheritance) { 3226 case VM_INHERIT_NONE: 3227 case VM_INHERIT_COPY: 3228 case VM_INHERIT_SHARE: 3229 case VM_INHERIT_ZERO: 3230 break; 3231 default: 3232 return (KERN_INVALID_ARGUMENT); 3233 } 3234 if (start == end) 3235 return (KERN_SUCCESS); 3236 vm_map_lock(map); 3237 VM_MAP_RANGE_CHECK(map, start, end); 3238 rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry); 3239 if (rv != KERN_SUCCESS) 3240 goto unlock; 3241 if (vm_map_lookup_entry(map, end - 1, &lentry)) { 3242 rv = vm_map_clip_end(map, lentry, end); 3243 if (rv != KERN_SUCCESS) 3244 goto unlock; 3245 } 3246 if (new_inheritance == VM_INHERIT_COPY) { 3247 for (entry = start_entry; entry->start < end; 3248 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3249 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) 3250 != 0) { 3251 rv = KERN_INVALID_ARGUMENT; 3252 goto unlock; 3253 } 3254 } 3255 } 3256 for (entry = start_entry; entry->start < end; prev_entry = entry, 3257 entry = vm_map_entry_succ(entry)) { 3258 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", 3259 entry, (uintmax_t)entry->end, (uintmax_t)end)); 3260 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 3261 new_inheritance != VM_INHERIT_ZERO) 3262 entry->inheritance = new_inheritance; 3263 vm_map_try_merge_entries(map, prev_entry, entry); 3264 } 3265 vm_map_try_merge_entries(map, prev_entry, entry); 3266 unlock: 3267 vm_map_unlock(map); 3268 return (rv); 3269 } 3270 3271 /* 3272 * vm_map_entry_in_transition: 3273 * 3274 * Release the map lock, and sleep until the entry is no longer in 3275 * transition. Awake and acquire the map lock. If the map changed while 3276 * another held the lock, lookup a possibly-changed entry at or after the 3277 * 'start' position of the old entry. 3278 */ 3279 static vm_map_entry_t 3280 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, 3281 vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry) 3282 { 3283 vm_map_entry_t entry; 3284 vm_offset_t start; 3285 u_int last_timestamp; 3286 3287 VM_MAP_ASSERT_LOCKED(map); 3288 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3289 ("not in-tranition map entry %p", in_entry)); 3290 /* 3291 * We have not yet clipped the entry. 3292 */ 3293 start = MAX(in_start, in_entry->start); 3294 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3295 last_timestamp = map->timestamp; 3296 if (vm_map_unlock_and_wait(map, 0)) { 3297 /* 3298 * Allow interruption of user wiring/unwiring? 3299 */ 3300 } 3301 vm_map_lock(map); 3302 if (last_timestamp + 1 == map->timestamp) 3303 return (in_entry); 3304 3305 /* 3306 * Look again for the entry because the map was modified while it was 3307 * unlocked. Specifically, the entry may have been clipped, merged, or 3308 * deleted. 3309 */ 3310 if (!vm_map_lookup_entry(map, start, &entry)) { 3311 if (!holes_ok) { 3312 *io_end = start; 3313 return (NULL); 3314 } 3315 entry = vm_map_entry_succ(entry); 3316 } 3317 return (entry); 3318 } 3319 3320 /* 3321 * vm_map_unwire: 3322 * 3323 * Implements both kernel and user unwiring. 3324 */ 3325 int 3326 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 3327 int flags) 3328 { 3329 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3330 int rv; 3331 bool holes_ok, need_wakeup, user_unwire; 3332 3333 if (start == end) 3334 return (KERN_SUCCESS); 3335 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3336 user_unwire = (flags & VM_MAP_WIRE_USER) != 0; 3337 vm_map_lock(map); 3338 VM_MAP_RANGE_CHECK(map, start, end); 3339 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3340 if (holes_ok) 3341 first_entry = vm_map_entry_succ(first_entry); 3342 else { 3343 vm_map_unlock(map); 3344 return (KERN_INVALID_ADDRESS); 3345 } 3346 } 3347 rv = KERN_SUCCESS; 3348 for (entry = first_entry; entry->start < end; entry = next_entry) { 3349 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3350 /* 3351 * We have not yet clipped the entry. 3352 */ 3353 next_entry = vm_map_entry_in_transition(map, start, 3354 &end, holes_ok, entry); 3355 if (next_entry == NULL) { 3356 if (entry == first_entry) { 3357 vm_map_unlock(map); 3358 return (KERN_INVALID_ADDRESS); 3359 } 3360 rv = KERN_INVALID_ADDRESS; 3361 break; 3362 } 3363 first_entry = (entry == first_entry) ? 3364 next_entry : NULL; 3365 continue; 3366 } 3367 rv = vm_map_clip_start(map, entry, start); 3368 if (rv != KERN_SUCCESS) 3369 break; 3370 rv = vm_map_clip_end(map, entry, end); 3371 if (rv != KERN_SUCCESS) 3372 break; 3373 3374 /* 3375 * Mark the entry in case the map lock is released. (See 3376 * above.) 3377 */ 3378 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3379 entry->wiring_thread == NULL, 3380 ("owned map entry %p", entry)); 3381 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3382 entry->wiring_thread = curthread; 3383 next_entry = vm_map_entry_succ(entry); 3384 /* 3385 * Check the map for holes in the specified region. 3386 * If holes_ok, skip this check. 3387 */ 3388 if (!holes_ok && 3389 entry->end < end && next_entry->start > entry->end) { 3390 end = entry->end; 3391 rv = KERN_INVALID_ADDRESS; 3392 break; 3393 } 3394 /* 3395 * If system unwiring, require that the entry is system wired. 3396 */ 3397 if (!user_unwire && 3398 vm_map_entry_system_wired_count(entry) == 0) { 3399 end = entry->end; 3400 rv = KERN_INVALID_ARGUMENT; 3401 break; 3402 } 3403 } 3404 need_wakeup = false; 3405 if (first_entry == NULL && 3406 !vm_map_lookup_entry(map, start, &first_entry)) { 3407 KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); 3408 prev_entry = first_entry; 3409 entry = vm_map_entry_succ(first_entry); 3410 } else { 3411 prev_entry = vm_map_entry_pred(first_entry); 3412 entry = first_entry; 3413 } 3414 for (; entry->start < end; 3415 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3416 /* 3417 * If holes_ok was specified, an empty 3418 * space in the unwired region could have been mapped 3419 * while the map lock was dropped for draining 3420 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 3421 * could be simultaneously wiring this new mapping 3422 * entry. Detect these cases and skip any entries 3423 * marked as in transition by us. 3424 */ 3425 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3426 entry->wiring_thread != curthread) { 3427 KASSERT(holes_ok, 3428 ("vm_map_unwire: !HOLESOK and new/changed entry")); 3429 continue; 3430 } 3431 3432 if (rv == KERN_SUCCESS && (!user_unwire || 3433 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 3434 if (entry->wired_count == 1) 3435 vm_map_entry_unwire(map, entry); 3436 else 3437 entry->wired_count--; 3438 if (user_unwire) 3439 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3440 } 3441 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3442 ("vm_map_unwire: in-transition flag missing %p", entry)); 3443 KASSERT(entry->wiring_thread == curthread, 3444 ("vm_map_unwire: alien wire %p", entry)); 3445 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 3446 entry->wiring_thread = NULL; 3447 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3448 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3449 need_wakeup = true; 3450 } 3451 vm_map_try_merge_entries(map, prev_entry, entry); 3452 } 3453 vm_map_try_merge_entries(map, prev_entry, entry); 3454 vm_map_unlock(map); 3455 if (need_wakeup) 3456 vm_map_wakeup(map); 3457 return (rv); 3458 } 3459 3460 static void 3461 vm_map_wire_user_count_sub(u_long npages) 3462 { 3463 3464 atomic_subtract_long(&vm_user_wire_count, npages); 3465 } 3466 3467 static bool 3468 vm_map_wire_user_count_add(u_long npages) 3469 { 3470 u_long wired; 3471 3472 wired = vm_user_wire_count; 3473 do { 3474 if (npages + wired > vm_page_max_user_wired) 3475 return (false); 3476 } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, 3477 npages + wired)); 3478 3479 return (true); 3480 } 3481 3482 /* 3483 * vm_map_wire_entry_failure: 3484 * 3485 * Handle a wiring failure on the given entry. 3486 * 3487 * The map should be locked. 3488 */ 3489 static void 3490 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 3491 vm_offset_t failed_addr) 3492 { 3493 3494 VM_MAP_ASSERT_LOCKED(map); 3495 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 3496 entry->wired_count == 1, 3497 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 3498 KASSERT(failed_addr < entry->end, 3499 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 3500 3501 /* 3502 * If any pages at the start of this entry were successfully wired, 3503 * then unwire them. 3504 */ 3505 if (failed_addr > entry->start) { 3506 pmap_unwire(map->pmap, entry->start, failed_addr); 3507 vm_object_unwire(entry->object.vm_object, entry->offset, 3508 failed_addr - entry->start, PQ_ACTIVE); 3509 } 3510 3511 /* 3512 * Assign an out-of-range value to represent the failure to wire this 3513 * entry. 3514 */ 3515 entry->wired_count = -1; 3516 } 3517 3518 int 3519 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3520 { 3521 int rv; 3522 3523 vm_map_lock(map); 3524 rv = vm_map_wire_locked(map, start, end, flags); 3525 vm_map_unlock(map); 3526 return (rv); 3527 } 3528 3529 /* 3530 * vm_map_wire_locked: 3531 * 3532 * Implements both kernel and user wiring. Returns with the map locked, 3533 * the map lock may be dropped. 3534 */ 3535 int 3536 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3537 { 3538 vm_map_entry_t entry, first_entry, next_entry, prev_entry; 3539 vm_offset_t faddr, saved_end, saved_start; 3540 u_long incr, npages; 3541 u_int bidx, last_timestamp; 3542 int rv; 3543 bool holes_ok, need_wakeup, user_wire; 3544 vm_prot_t prot; 3545 3546 VM_MAP_ASSERT_LOCKED(map); 3547 3548 if (start == end) 3549 return (KERN_SUCCESS); 3550 prot = 0; 3551 if (flags & VM_MAP_WIRE_WRITE) 3552 prot |= VM_PROT_WRITE; 3553 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3554 user_wire = (flags & VM_MAP_WIRE_USER) != 0; 3555 VM_MAP_RANGE_CHECK(map, start, end); 3556 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3557 if (holes_ok) 3558 first_entry = vm_map_entry_succ(first_entry); 3559 else 3560 return (KERN_INVALID_ADDRESS); 3561 } 3562 for (entry = first_entry; entry->start < end; entry = next_entry) { 3563 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3564 /* 3565 * We have not yet clipped the entry. 3566 */ 3567 next_entry = vm_map_entry_in_transition(map, start, 3568 &end, holes_ok, entry); 3569 if (next_entry == NULL) { 3570 if (entry == first_entry) 3571 return (KERN_INVALID_ADDRESS); 3572 rv = KERN_INVALID_ADDRESS; 3573 goto done; 3574 } 3575 first_entry = (entry == first_entry) ? 3576 next_entry : NULL; 3577 continue; 3578 } 3579 rv = vm_map_clip_start(map, entry, start); 3580 if (rv != KERN_SUCCESS) 3581 goto done; 3582 rv = vm_map_clip_end(map, entry, end); 3583 if (rv != KERN_SUCCESS) 3584 goto done; 3585 3586 /* 3587 * Mark the entry in case the map lock is released. (See 3588 * above.) 3589 */ 3590 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3591 entry->wiring_thread == NULL, 3592 ("owned map entry %p", entry)); 3593 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3594 entry->wiring_thread = curthread; 3595 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3596 || (entry->protection & prot) != prot) { 3597 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3598 if (!holes_ok) { 3599 end = entry->end; 3600 rv = KERN_INVALID_ADDRESS; 3601 goto done; 3602 } 3603 } else if (entry->wired_count == 0) { 3604 entry->wired_count++; 3605 3606 npages = atop(entry->end - entry->start); 3607 if (user_wire && !vm_map_wire_user_count_add(npages)) { 3608 vm_map_wire_entry_failure(map, entry, 3609 entry->start); 3610 end = entry->end; 3611 rv = KERN_RESOURCE_SHORTAGE; 3612 goto done; 3613 } 3614 3615 /* 3616 * Release the map lock, relying on the in-transition 3617 * mark. Mark the map busy for fork. 3618 */ 3619 saved_start = entry->start; 3620 saved_end = entry->end; 3621 last_timestamp = map->timestamp; 3622 bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3623 incr = pagesizes[bidx]; 3624 vm_map_busy(map); 3625 vm_map_unlock(map); 3626 3627 for (faddr = saved_start; faddr < saved_end; 3628 faddr += incr) { 3629 /* 3630 * Simulate a fault to get the page and enter 3631 * it into the physical map. 3632 */ 3633 rv = vm_fault(map, faddr, VM_PROT_NONE, 3634 VM_FAULT_WIRE, NULL); 3635 if (rv != KERN_SUCCESS) 3636 break; 3637 } 3638 vm_map_lock(map); 3639 vm_map_unbusy(map); 3640 if (last_timestamp + 1 != map->timestamp) { 3641 /* 3642 * Look again for the entry because the map was 3643 * modified while it was unlocked. The entry 3644 * may have been clipped, but NOT merged or 3645 * deleted. 3646 */ 3647 if (!vm_map_lookup_entry(map, saved_start, 3648 &next_entry)) 3649 KASSERT(false, 3650 ("vm_map_wire: lookup failed")); 3651 first_entry = (entry == first_entry) ? 3652 next_entry : NULL; 3653 for (entry = next_entry; entry->end < saved_end; 3654 entry = vm_map_entry_succ(entry)) { 3655 /* 3656 * In case of failure, handle entries 3657 * that were not fully wired here; 3658 * fully wired entries are handled 3659 * later. 3660 */ 3661 if (rv != KERN_SUCCESS && 3662 faddr < entry->end) 3663 vm_map_wire_entry_failure(map, 3664 entry, faddr); 3665 } 3666 } 3667 if (rv != KERN_SUCCESS) { 3668 vm_map_wire_entry_failure(map, entry, faddr); 3669 if (user_wire) 3670 vm_map_wire_user_count_sub(npages); 3671 end = entry->end; 3672 goto done; 3673 } 3674 } else if (!user_wire || 3675 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3676 entry->wired_count++; 3677 } 3678 /* 3679 * Check the map for holes in the specified region. 3680 * If holes_ok was specified, skip this check. 3681 */ 3682 next_entry = vm_map_entry_succ(entry); 3683 if (!holes_ok && 3684 entry->end < end && next_entry->start > entry->end) { 3685 end = entry->end; 3686 rv = KERN_INVALID_ADDRESS; 3687 goto done; 3688 } 3689 } 3690 rv = KERN_SUCCESS; 3691 done: 3692 need_wakeup = false; 3693 if (first_entry == NULL && 3694 !vm_map_lookup_entry(map, start, &first_entry)) { 3695 KASSERT(holes_ok, ("vm_map_wire: lookup failed")); 3696 prev_entry = first_entry; 3697 entry = vm_map_entry_succ(first_entry); 3698 } else { 3699 prev_entry = vm_map_entry_pred(first_entry); 3700 entry = first_entry; 3701 } 3702 for (; entry->start < end; 3703 prev_entry = entry, entry = vm_map_entry_succ(entry)) { 3704 /* 3705 * If holes_ok was specified, an empty 3706 * space in the unwired region could have been mapped 3707 * while the map lock was dropped for faulting in the 3708 * pages or draining MAP_ENTRY_IN_TRANSITION. 3709 * Moreover, another thread could be simultaneously 3710 * wiring this new mapping entry. Detect these cases 3711 * and skip any entries marked as in transition not by us. 3712 * 3713 * Another way to get an entry not marked with 3714 * MAP_ENTRY_IN_TRANSITION is after failed clipping, 3715 * which set rv to KERN_INVALID_ARGUMENT. 3716 */ 3717 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3718 entry->wiring_thread != curthread) { 3719 KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT, 3720 ("vm_map_wire: !HOLESOK and new/changed entry")); 3721 continue; 3722 } 3723 3724 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { 3725 /* do nothing */ 3726 } else if (rv == KERN_SUCCESS) { 3727 if (user_wire) 3728 entry->eflags |= MAP_ENTRY_USER_WIRED; 3729 } else if (entry->wired_count == -1) { 3730 /* 3731 * Wiring failed on this entry. Thus, unwiring is 3732 * unnecessary. 3733 */ 3734 entry->wired_count = 0; 3735 } else if (!user_wire || 3736 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3737 /* 3738 * Undo the wiring. Wiring succeeded on this entry 3739 * but failed on a later entry. 3740 */ 3741 if (entry->wired_count == 1) { 3742 vm_map_entry_unwire(map, entry); 3743 if (user_wire) 3744 vm_map_wire_user_count_sub( 3745 atop(entry->end - entry->start)); 3746 } else 3747 entry->wired_count--; 3748 } 3749 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3750 ("vm_map_wire: in-transition flag missing %p", entry)); 3751 KASSERT(entry->wiring_thread == curthread, 3752 ("vm_map_wire: alien wire %p", entry)); 3753 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3754 MAP_ENTRY_WIRE_SKIPPED); 3755 entry->wiring_thread = NULL; 3756 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3757 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3758 need_wakeup = true; 3759 } 3760 vm_map_try_merge_entries(map, prev_entry, entry); 3761 } 3762 vm_map_try_merge_entries(map, prev_entry, entry); 3763 if (need_wakeup) 3764 vm_map_wakeup(map); 3765 return (rv); 3766 } 3767 3768 /* 3769 * vm_map_sync 3770 * 3771 * Push any dirty cached pages in the address range to their pager. 3772 * If syncio is TRUE, dirty pages are written synchronously. 3773 * If invalidate is TRUE, any cached pages are freed as well. 3774 * 3775 * If the size of the region from start to end is zero, we are 3776 * supposed to flush all modified pages within the region containing 3777 * start. Unfortunately, a region can be split or coalesced with 3778 * neighboring regions, making it difficult to determine what the 3779 * original region was. Therefore, we approximate this requirement by 3780 * flushing the current region containing start. 3781 * 3782 * Returns an error if any part of the specified range is not mapped. 3783 */ 3784 int 3785 vm_map_sync( 3786 vm_map_t map, 3787 vm_offset_t start, 3788 vm_offset_t end, 3789 boolean_t syncio, 3790 boolean_t invalidate) 3791 { 3792 vm_map_entry_t entry, first_entry, next_entry; 3793 vm_size_t size; 3794 vm_object_t object; 3795 vm_ooffset_t offset; 3796 unsigned int last_timestamp; 3797 int bdry_idx; 3798 boolean_t failed; 3799 3800 vm_map_lock_read(map); 3801 VM_MAP_RANGE_CHECK(map, start, end); 3802 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3803 vm_map_unlock_read(map); 3804 return (KERN_INVALID_ADDRESS); 3805 } else if (start == end) { 3806 start = first_entry->start; 3807 end = first_entry->end; 3808 } 3809 3810 /* 3811 * Make a first pass to check for user-wired memory, holes, 3812 * and partial invalidation of largepage mappings. 3813 */ 3814 for (entry = first_entry; entry->start < end; entry = next_entry) { 3815 if (invalidate) { 3816 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { 3817 vm_map_unlock_read(map); 3818 return (KERN_INVALID_ARGUMENT); 3819 } 3820 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); 3821 if (bdry_idx != 0 && 3822 ((start & (pagesizes[bdry_idx] - 1)) != 0 || 3823 (end & (pagesizes[bdry_idx] - 1)) != 0)) { 3824 vm_map_unlock_read(map); 3825 return (KERN_INVALID_ARGUMENT); 3826 } 3827 } 3828 next_entry = vm_map_entry_succ(entry); 3829 if (end > entry->end && 3830 entry->end != next_entry->start) { 3831 vm_map_unlock_read(map); 3832 return (KERN_INVALID_ADDRESS); 3833 } 3834 } 3835 3836 if (invalidate) 3837 pmap_remove(map->pmap, start, end); 3838 failed = FALSE; 3839 3840 /* 3841 * Make a second pass, cleaning/uncaching pages from the indicated 3842 * objects as we go. 3843 */ 3844 for (entry = first_entry; entry->start < end;) { 3845 offset = entry->offset + (start - entry->start); 3846 size = (end <= entry->end ? end : entry->end) - start; 3847 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { 3848 vm_map_t smap; 3849 vm_map_entry_t tentry; 3850 vm_size_t tsize; 3851 3852 smap = entry->object.sub_map; 3853 vm_map_lock_read(smap); 3854 (void) vm_map_lookup_entry(smap, offset, &tentry); 3855 tsize = tentry->end - offset; 3856 if (tsize < size) 3857 size = tsize; 3858 object = tentry->object.vm_object; 3859 offset = tentry->offset + (offset - tentry->start); 3860 vm_map_unlock_read(smap); 3861 } else { 3862 object = entry->object.vm_object; 3863 } 3864 vm_object_reference(object); 3865 last_timestamp = map->timestamp; 3866 vm_map_unlock_read(map); 3867 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3868 failed = TRUE; 3869 start += size; 3870 vm_object_deallocate(object); 3871 vm_map_lock_read(map); 3872 if (last_timestamp == map->timestamp || 3873 !vm_map_lookup_entry(map, start, &entry)) 3874 entry = vm_map_entry_succ(entry); 3875 } 3876 3877 vm_map_unlock_read(map); 3878 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3879 } 3880 3881 /* 3882 * vm_map_entry_unwire: [ internal use only ] 3883 * 3884 * Make the region specified by this entry pageable. 3885 * 3886 * The map in question should be locked. 3887 * [This is the reason for this routine's existence.] 3888 */ 3889 static void 3890 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3891 { 3892 vm_size_t size; 3893 3894 VM_MAP_ASSERT_LOCKED(map); 3895 KASSERT(entry->wired_count > 0, 3896 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3897 3898 size = entry->end - entry->start; 3899 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) 3900 vm_map_wire_user_count_sub(atop(size)); 3901 pmap_unwire(map->pmap, entry->start, entry->end); 3902 vm_object_unwire(entry->object.vm_object, entry->offset, size, 3903 PQ_ACTIVE); 3904 entry->wired_count = 0; 3905 } 3906 3907 static void 3908 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3909 { 3910 3911 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3912 vm_object_deallocate(entry->object.vm_object); 3913 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3914 } 3915 3916 /* 3917 * vm_map_entry_delete: [ internal use only ] 3918 * 3919 * Deallocate the given entry from the target map. 3920 */ 3921 static void 3922 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3923 { 3924 vm_object_t object; 3925 vm_pindex_t offidxstart, offidxend, size1; 3926 vm_size_t size; 3927 3928 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3929 object = entry->object.vm_object; 3930 3931 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3932 MPASS(entry->cred == NULL); 3933 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3934 MPASS(object == NULL); 3935 vm_map_entry_deallocate(entry, map->system_map); 3936 return; 3937 } 3938 3939 size = entry->end - entry->start; 3940 map->size -= size; 3941 3942 if (entry->cred != NULL) { 3943 swap_release_by_cred(size, entry->cred); 3944 crfree(entry->cred); 3945 } 3946 3947 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { 3948 entry->object.vm_object = NULL; 3949 } else if ((object->flags & OBJ_ANON) != 0 || 3950 object == kernel_object) { 3951 KASSERT(entry->cred == NULL || object->cred == NULL || 3952 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3953 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3954 offidxstart = OFF_TO_IDX(entry->offset); 3955 offidxend = offidxstart + atop(size); 3956 VM_OBJECT_WLOCK(object); 3957 if (object->ref_count != 1 && 3958 ((object->flags & OBJ_ONEMAPPING) != 0 || 3959 object == kernel_object)) { 3960 vm_object_collapse(object); 3961 3962 /* 3963 * The option OBJPR_NOTMAPPED can be passed here 3964 * because vm_map_delete() already performed 3965 * pmap_remove() on the only mapping to this range 3966 * of pages. 3967 */ 3968 vm_object_page_remove(object, offidxstart, offidxend, 3969 OBJPR_NOTMAPPED); 3970 if (offidxend >= object->size && 3971 offidxstart < object->size) { 3972 size1 = object->size; 3973 object->size = offidxstart; 3974 if (object->cred != NULL) { 3975 size1 -= object->size; 3976 KASSERT(object->charge >= ptoa(size1), 3977 ("object %p charge < 0", object)); 3978 swap_release_by_cred(ptoa(size1), 3979 object->cred); 3980 object->charge -= ptoa(size1); 3981 } 3982 } 3983 } 3984 VM_OBJECT_WUNLOCK(object); 3985 } 3986 if (map->system_map) 3987 vm_map_entry_deallocate(entry, TRUE); 3988 else { 3989 entry->defer_next = curthread->td_map_def_user; 3990 curthread->td_map_def_user = entry; 3991 } 3992 } 3993 3994 /* 3995 * vm_map_delete: [ internal use only ] 3996 * 3997 * Deallocates the given address range from the target 3998 * map. 3999 */ 4000 int 4001 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 4002 { 4003 vm_map_entry_t entry, next_entry, scratch_entry; 4004 int rv; 4005 4006 VM_MAP_ASSERT_LOCKED(map); 4007 4008 if (start == end) 4009 return (KERN_SUCCESS); 4010 4011 /* 4012 * Find the start of the region, and clip it. 4013 * Step through all entries in this region. 4014 */ 4015 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry); 4016 if (rv != KERN_SUCCESS) 4017 return (rv); 4018 for (; entry->start < end; entry = next_entry) { 4019 /* 4020 * Wait for wiring or unwiring of an entry to complete. 4021 * Also wait for any system wirings to disappear on 4022 * user maps. 4023 */ 4024 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 4025 (vm_map_pmap(map) != kernel_pmap && 4026 vm_map_entry_system_wired_count(entry) != 0)) { 4027 unsigned int last_timestamp; 4028 vm_offset_t saved_start; 4029 4030 saved_start = entry->start; 4031 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 4032 last_timestamp = map->timestamp; 4033 (void) vm_map_unlock_and_wait(map, 0); 4034 vm_map_lock(map); 4035 if (last_timestamp + 1 != map->timestamp) { 4036 /* 4037 * Look again for the entry because the map was 4038 * modified while it was unlocked. 4039 * Specifically, the entry may have been 4040 * clipped, merged, or deleted. 4041 */ 4042 rv = vm_map_lookup_clip_start(map, saved_start, 4043 &next_entry, &scratch_entry); 4044 if (rv != KERN_SUCCESS) 4045 break; 4046 } else 4047 next_entry = entry; 4048 continue; 4049 } 4050 4051 /* XXXKIB or delete to the upper superpage boundary ? */ 4052 rv = vm_map_clip_end(map, entry, end); 4053 if (rv != KERN_SUCCESS) 4054 break; 4055 next_entry = vm_map_entry_succ(entry); 4056 4057 /* 4058 * Unwire before removing addresses from the pmap; otherwise, 4059 * unwiring will put the entries back in the pmap. 4060 */ 4061 if (entry->wired_count != 0) 4062 vm_map_entry_unwire(map, entry); 4063 4064 /* 4065 * Remove mappings for the pages, but only if the 4066 * mappings could exist. For instance, it does not 4067 * make sense to call pmap_remove() for guard entries. 4068 */ 4069 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 4070 entry->object.vm_object != NULL) 4071 pmap_map_delete(map->pmap, entry->start, entry->end); 4072 4073 /* 4074 * Delete the entry only after removing all pmap 4075 * entries pointing to its pages. (Otherwise, its 4076 * page frames may be reallocated, and any modify bits 4077 * will be set in the wrong object!) 4078 */ 4079 vm_map_entry_delete(map, entry); 4080 } 4081 return (rv); 4082 } 4083 4084 /* 4085 * vm_map_remove: 4086 * 4087 * Remove the given address range from the target map. 4088 * This is the exported form of vm_map_delete. 4089 */ 4090 int 4091 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 4092 { 4093 int result; 4094 4095 vm_map_lock(map); 4096 VM_MAP_RANGE_CHECK(map, start, end); 4097 result = vm_map_delete(map, start, end); 4098 vm_map_unlock(map); 4099 return (result); 4100 } 4101 4102 /* 4103 * vm_map_check_protection: 4104 * 4105 * Assert that the target map allows the specified privilege on the 4106 * entire address region given. The entire region must be allocated. 4107 * 4108 * WARNING! This code does not and should not check whether the 4109 * contents of the region is accessible. For example a smaller file 4110 * might be mapped into a larger address space. 4111 * 4112 * NOTE! This code is also called by munmap(). 4113 * 4114 * The map must be locked. A read lock is sufficient. 4115 */ 4116 boolean_t 4117 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 4118 vm_prot_t protection) 4119 { 4120 vm_map_entry_t entry; 4121 vm_map_entry_t tmp_entry; 4122 4123 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 4124 return (FALSE); 4125 entry = tmp_entry; 4126 4127 while (start < end) { 4128 /* 4129 * No holes allowed! 4130 */ 4131 if (start < entry->start) 4132 return (FALSE); 4133 /* 4134 * Check protection associated with entry. 4135 */ 4136 if ((entry->protection & protection) != protection) 4137 return (FALSE); 4138 /* go to next entry */ 4139 start = entry->end; 4140 entry = vm_map_entry_succ(entry); 4141 } 4142 return (TRUE); 4143 } 4144 4145 /* 4146 * 4147 * vm_map_copy_swap_object: 4148 * 4149 * Copies a swap-backed object from an existing map entry to a 4150 * new one. Carries forward the swap charge. May change the 4151 * src object on return. 4152 */ 4153 static void 4154 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, 4155 vm_offset_t size, vm_ooffset_t *fork_charge) 4156 { 4157 vm_object_t src_object; 4158 struct ucred *cred; 4159 int charged; 4160 4161 src_object = src_entry->object.vm_object; 4162 charged = ENTRY_CHARGED(src_entry); 4163 if ((src_object->flags & OBJ_ANON) != 0) { 4164 VM_OBJECT_WLOCK(src_object); 4165 vm_object_collapse(src_object); 4166 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { 4167 vm_object_split(src_entry); 4168 src_object = src_entry->object.vm_object; 4169 } 4170 vm_object_reference_locked(src_object); 4171 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 4172 VM_OBJECT_WUNLOCK(src_object); 4173 } else 4174 vm_object_reference(src_object); 4175 if (src_entry->cred != NULL && 4176 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4177 KASSERT(src_object->cred == NULL, 4178 ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p", 4179 src_object)); 4180 src_object->cred = src_entry->cred; 4181 src_object->charge = size; 4182 } 4183 dst_entry->object.vm_object = src_object; 4184 if (charged) { 4185 cred = curthread->td_ucred; 4186 crhold(cred); 4187 dst_entry->cred = cred; 4188 *fork_charge += size; 4189 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 4190 crhold(cred); 4191 src_entry->cred = cred; 4192 *fork_charge += size; 4193 } 4194 } 4195 } 4196 4197 /* 4198 * vm_map_copy_entry: 4199 * 4200 * Copies the contents of the source entry to the destination 4201 * entry. The entries *must* be aligned properly. 4202 */ 4203 static void 4204 vm_map_copy_entry( 4205 vm_map_t src_map, 4206 vm_map_t dst_map, 4207 vm_map_entry_t src_entry, 4208 vm_map_entry_t dst_entry, 4209 vm_ooffset_t *fork_charge) 4210 { 4211 vm_object_t src_object; 4212 vm_map_entry_t fake_entry; 4213 vm_offset_t size; 4214 4215 VM_MAP_ASSERT_LOCKED(dst_map); 4216 4217 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 4218 return; 4219 4220 if (src_entry->wired_count == 0 || 4221 (src_entry->protection & VM_PROT_WRITE) == 0) { 4222 /* 4223 * If the source entry is marked needs_copy, it is already 4224 * write-protected. 4225 */ 4226 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 4227 (src_entry->protection & VM_PROT_WRITE) != 0) { 4228 pmap_protect(src_map->pmap, 4229 src_entry->start, 4230 src_entry->end, 4231 src_entry->protection & ~VM_PROT_WRITE); 4232 } 4233 4234 /* 4235 * Make a copy of the object. 4236 */ 4237 size = src_entry->end - src_entry->start; 4238 if ((src_object = src_entry->object.vm_object) != NULL) { 4239 if ((src_object->flags & OBJ_SWAP) != 0) { 4240 vm_map_copy_swap_object(src_entry, dst_entry, 4241 size, fork_charge); 4242 /* May have split/collapsed, reload obj. */ 4243 src_object = src_entry->object.vm_object; 4244 } else { 4245 vm_object_reference(src_object); 4246 dst_entry->object.vm_object = src_object; 4247 } 4248 src_entry->eflags |= MAP_ENTRY_COW | 4249 MAP_ENTRY_NEEDS_COPY; 4250 dst_entry->eflags |= MAP_ENTRY_COW | 4251 MAP_ENTRY_NEEDS_COPY; 4252 dst_entry->offset = src_entry->offset; 4253 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { 4254 /* 4255 * MAP_ENTRY_WRITECNT cannot 4256 * indicate write reference from 4257 * src_entry, since the entry is 4258 * marked as needs copy. Allocate a 4259 * fake entry that is used to 4260 * decrement object->un_pager writecount 4261 * at the appropriate time. Attach 4262 * fake_entry to the deferred list. 4263 */ 4264 fake_entry = vm_map_entry_create(dst_map); 4265 fake_entry->eflags = MAP_ENTRY_WRITECNT; 4266 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; 4267 vm_object_reference(src_object); 4268 fake_entry->object.vm_object = src_object; 4269 fake_entry->start = src_entry->start; 4270 fake_entry->end = src_entry->end; 4271 fake_entry->defer_next = 4272 curthread->td_map_def_user; 4273 curthread->td_map_def_user = fake_entry; 4274 } 4275 4276 pmap_copy(dst_map->pmap, src_map->pmap, 4277 dst_entry->start, dst_entry->end - dst_entry->start, 4278 src_entry->start); 4279 } else { 4280 dst_entry->object.vm_object = NULL; 4281 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) 4282 dst_entry->offset = 0; 4283 if (src_entry->cred != NULL) { 4284 dst_entry->cred = curthread->td_ucred; 4285 crhold(dst_entry->cred); 4286 *fork_charge += size; 4287 } 4288 } 4289 } else { 4290 /* 4291 * We don't want to make writeable wired pages copy-on-write. 4292 * Immediately copy these pages into the new map by simulating 4293 * page faults. The new pages are pageable. 4294 */ 4295 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 4296 fork_charge); 4297 } 4298 } 4299 4300 /* 4301 * vmspace_map_entry_forked: 4302 * Update the newly-forked vmspace each time a map entry is inherited 4303 * or copied. The values for vm_dsize and vm_tsize are approximate 4304 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 4305 */ 4306 static void 4307 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 4308 vm_map_entry_t entry) 4309 { 4310 vm_size_t entrysize; 4311 vm_offset_t newend; 4312 4313 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 4314 return; 4315 entrysize = entry->end - entry->start; 4316 vm2->vm_map.size += entrysize; 4317 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { 4318 vm2->vm_ssize += btoc(entrysize); 4319 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 4320 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 4321 newend = MIN(entry->end, 4322 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 4323 vm2->vm_dsize += btoc(newend - entry->start); 4324 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 4325 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 4326 newend = MIN(entry->end, 4327 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 4328 vm2->vm_tsize += btoc(newend - entry->start); 4329 } 4330 } 4331 4332 /* 4333 * vmspace_fork: 4334 * Create a new process vmspace structure and vm_map 4335 * based on those of an existing process. The new map 4336 * is based on the old map, according to the inheritance 4337 * values on the regions in that map. 4338 * 4339 * XXX It might be worth coalescing the entries added to the new vmspace. 4340 * 4341 * The source map must not be locked. 4342 */ 4343 struct vmspace * 4344 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 4345 { 4346 struct vmspace *vm2; 4347 vm_map_t new_map, old_map; 4348 vm_map_entry_t new_entry, old_entry; 4349 vm_object_t object; 4350 int error, locked __diagused; 4351 vm_inherit_t inh; 4352 4353 old_map = &vm1->vm_map; 4354 /* Copy immutable fields of vm1 to vm2. */ 4355 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 4356 pmap_pinit); 4357 if (vm2 == NULL) 4358 return (NULL); 4359 4360 vm2->vm_taddr = vm1->vm_taddr; 4361 vm2->vm_daddr = vm1->vm_daddr; 4362 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 4363 vm2->vm_stacktop = vm1->vm_stacktop; 4364 vm2->vm_shp_base = vm1->vm_shp_base; 4365 vm_map_lock(old_map); 4366 if (old_map->busy) 4367 vm_map_wait_busy(old_map); 4368 new_map = &vm2->vm_map; 4369 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 4370 KASSERT(locked, ("vmspace_fork: lock failed")); 4371 4372 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 4373 if (error != 0) { 4374 sx_xunlock(&old_map->lock); 4375 sx_xunlock(&new_map->lock); 4376 vm_map_process_deferred(); 4377 vmspace_free(vm2); 4378 return (NULL); 4379 } 4380 4381 new_map->anon_loc = old_map->anon_loc; 4382 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | 4383 MAP_ASLR_STACK | MAP_WXORX); 4384 4385 VM_MAP_ENTRY_FOREACH(old_entry, old_map) { 4386 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 4387 panic("vm_map_fork: encountered a submap"); 4388 4389 inh = old_entry->inheritance; 4390 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 4391 inh != VM_INHERIT_NONE) 4392 inh = VM_INHERIT_COPY; 4393 4394 switch (inh) { 4395 case VM_INHERIT_NONE: 4396 break; 4397 4398 case VM_INHERIT_SHARE: 4399 /* 4400 * Clone the entry, creating the shared object if 4401 * necessary. 4402 */ 4403 object = old_entry->object.vm_object; 4404 if (object == NULL) { 4405 vm_map_entry_back(old_entry); 4406 object = old_entry->object.vm_object; 4407 } 4408 4409 /* 4410 * Add the reference before calling vm_object_shadow 4411 * to insure that a shadow object is created. 4412 */ 4413 vm_object_reference(object); 4414 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4415 vm_object_shadow(&old_entry->object.vm_object, 4416 &old_entry->offset, 4417 old_entry->end - old_entry->start, 4418 old_entry->cred, 4419 /* Transfer the second reference too. */ 4420 true); 4421 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4422 old_entry->cred = NULL; 4423 4424 /* 4425 * As in vm_map_merged_neighbor_dispose(), 4426 * the vnode lock will not be acquired in 4427 * this call to vm_object_deallocate(). 4428 */ 4429 vm_object_deallocate(object); 4430 object = old_entry->object.vm_object; 4431 } else { 4432 VM_OBJECT_WLOCK(object); 4433 vm_object_clear_flag(object, OBJ_ONEMAPPING); 4434 if (old_entry->cred != NULL) { 4435 KASSERT(object->cred == NULL, 4436 ("vmspace_fork both cred")); 4437 object->cred = old_entry->cred; 4438 object->charge = old_entry->end - 4439 old_entry->start; 4440 old_entry->cred = NULL; 4441 } 4442 4443 /* 4444 * Assert the correct state of the vnode 4445 * v_writecount while the object is locked, to 4446 * not relock it later for the assertion 4447 * correctness. 4448 */ 4449 if (old_entry->eflags & MAP_ENTRY_WRITECNT && 4450 object->type == OBJT_VNODE) { 4451 KASSERT(((struct vnode *)object-> 4452 handle)->v_writecount > 0, 4453 ("vmspace_fork: v_writecount %p", 4454 object)); 4455 KASSERT(object->un_pager.vnp. 4456 writemappings > 0, 4457 ("vmspace_fork: vnp.writecount %p", 4458 object)); 4459 } 4460 VM_OBJECT_WUNLOCK(object); 4461 } 4462 4463 /* 4464 * Clone the entry, referencing the shared object. 4465 */ 4466 new_entry = vm_map_entry_create(new_map); 4467 *new_entry = *old_entry; 4468 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4469 MAP_ENTRY_IN_TRANSITION); 4470 new_entry->wiring_thread = NULL; 4471 new_entry->wired_count = 0; 4472 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { 4473 vm_pager_update_writecount(object, 4474 new_entry->start, new_entry->end); 4475 } 4476 vm_map_entry_set_vnode_text(new_entry, true); 4477 4478 /* 4479 * Insert the entry into the new map -- we know we're 4480 * inserting at the end of the new map. 4481 */ 4482 vm_map_entry_link(new_map, new_entry); 4483 vmspace_map_entry_forked(vm1, vm2, new_entry); 4484 4485 /* 4486 * Update the physical map 4487 */ 4488 pmap_copy(new_map->pmap, old_map->pmap, 4489 new_entry->start, 4490 (old_entry->end - old_entry->start), 4491 old_entry->start); 4492 break; 4493 4494 case VM_INHERIT_COPY: 4495 /* 4496 * Clone the entry and link into the map. 4497 */ 4498 new_entry = vm_map_entry_create(new_map); 4499 *new_entry = *old_entry; 4500 /* 4501 * Copied entry is COW over the old object. 4502 */ 4503 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4504 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT); 4505 new_entry->wiring_thread = NULL; 4506 new_entry->wired_count = 0; 4507 new_entry->object.vm_object = NULL; 4508 new_entry->cred = NULL; 4509 vm_map_entry_link(new_map, new_entry); 4510 vmspace_map_entry_forked(vm1, vm2, new_entry); 4511 vm_map_copy_entry(old_map, new_map, old_entry, 4512 new_entry, fork_charge); 4513 vm_map_entry_set_vnode_text(new_entry, true); 4514 break; 4515 4516 case VM_INHERIT_ZERO: 4517 /* 4518 * Create a new anonymous mapping entry modelled from 4519 * the old one. 4520 */ 4521 new_entry = vm_map_entry_create(new_map); 4522 memset(new_entry, 0, sizeof(*new_entry)); 4523 4524 new_entry->start = old_entry->start; 4525 new_entry->end = old_entry->end; 4526 new_entry->eflags = old_entry->eflags & 4527 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 4528 MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC | 4529 MAP_ENTRY_SPLIT_BOUNDARY_MASK); 4530 new_entry->protection = old_entry->protection; 4531 new_entry->max_protection = old_entry->max_protection; 4532 new_entry->inheritance = VM_INHERIT_ZERO; 4533 4534 vm_map_entry_link(new_map, new_entry); 4535 vmspace_map_entry_forked(vm1, vm2, new_entry); 4536 4537 new_entry->cred = curthread->td_ucred; 4538 crhold(new_entry->cred); 4539 *fork_charge += (new_entry->end - new_entry->start); 4540 4541 break; 4542 } 4543 } 4544 /* 4545 * Use inlined vm_map_unlock() to postpone handling the deferred 4546 * map entries, which cannot be done until both old_map and 4547 * new_map locks are released. 4548 */ 4549 sx_xunlock(&old_map->lock); 4550 sx_xunlock(&new_map->lock); 4551 vm_map_process_deferred(); 4552 4553 return (vm2); 4554 } 4555 4556 /* 4557 * Create a process's stack for exec_new_vmspace(). This function is never 4558 * asked to wire the newly created stack. 4559 */ 4560 int 4561 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4562 vm_prot_t prot, vm_prot_t max, int cow) 4563 { 4564 vm_size_t growsize, init_ssize; 4565 rlim_t vmemlim; 4566 int rv; 4567 4568 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4569 growsize = sgrowsiz; 4570 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4571 vm_map_lock(map); 4572 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4573 /* If we would blow our VMEM resource limit, no go */ 4574 if (map->size + init_ssize > vmemlim) { 4575 rv = KERN_NO_SPACE; 4576 goto out; 4577 } 4578 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4579 max, cow); 4580 out: 4581 vm_map_unlock(map); 4582 return (rv); 4583 } 4584 4585 static int stack_guard_page = 1; 4586 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4587 &stack_guard_page, 0, 4588 "Specifies the number of guard pages for a stack that grows"); 4589 4590 static int 4591 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4592 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4593 { 4594 vm_map_entry_t gap_entry, new_entry, prev_entry; 4595 vm_offset_t bot, gap_bot, gap_top, top; 4596 vm_size_t init_ssize, sgp; 4597 int rv; 4598 4599 KASSERT((cow & MAP_STACK_AREA) != 0, 4600 ("New mapping is not a stack")); 4601 4602 if (max_ssize == 0 || 4603 !vm_map_range_valid(map, addrbos, addrbos + max_ssize)) 4604 return (KERN_INVALID_ADDRESS); 4605 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4606 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4607 (vm_size_t)stack_guard_page * PAGE_SIZE; 4608 if (sgp >= max_ssize) 4609 return (KERN_INVALID_ARGUMENT); 4610 4611 init_ssize = growsize; 4612 if (max_ssize < init_ssize + sgp) 4613 init_ssize = max_ssize - sgp; 4614 4615 /* If addr is already mapped, no go */ 4616 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4617 return (KERN_NO_SPACE); 4618 4619 /* 4620 * If we can't accommodate max_ssize in the current mapping, no go. 4621 */ 4622 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) 4623 return (KERN_NO_SPACE); 4624 4625 /* 4626 * We initially map a stack of only init_ssize, at the top of 4627 * the range. We will grow as needed later. 4628 * 4629 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4630 * and cow to be 0. Possibly we should eliminate these as input 4631 * parameters, and just pass these values here in the insert call. 4632 */ 4633 bot = addrbos + max_ssize - init_ssize; 4634 top = bot + init_ssize; 4635 gap_bot = addrbos; 4636 gap_top = bot; 4637 rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow, 4638 &new_entry); 4639 if (rv != KERN_SUCCESS) 4640 return (rv); 4641 KASSERT(new_entry->end == top || new_entry->start == bot, 4642 ("Bad entry start/end for new stack entry")); 4643 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4644 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4645 if (gap_bot == gap_top) 4646 return (KERN_SUCCESS); 4647 rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4648 VM_PROT_NONE, MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP, 4649 &gap_entry); 4650 if (rv == KERN_SUCCESS) { 4651 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, 4652 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); 4653 KASSERT((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0, 4654 ("entry %p not stack gap %#x", gap_entry, 4655 gap_entry->eflags)); 4656 4657 /* 4658 * Gap can never successfully handle a fault, so 4659 * read-ahead logic is never used for it. Re-use 4660 * next_read of the gap entry to store 4661 * stack_guard_page for vm_map_growstack(). 4662 * Similarly, since a gap cannot have a backing object, 4663 * store the original stack protections in the 4664 * object offset. 4665 */ 4666 gap_entry->next_read = sgp; 4667 gap_entry->offset = prot | PROT_MAX(max); 4668 } else { 4669 (void)vm_map_delete(map, bot, top); 4670 } 4671 return (rv); 4672 } 4673 4674 /* 4675 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4676 * successfully grow the stack. 4677 */ 4678 static int 4679 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4680 { 4681 vm_map_entry_t stack_entry; 4682 struct proc *p; 4683 struct vmspace *vm; 4684 vm_offset_t gap_end, gap_start, grow_start; 4685 vm_size_t grow_amount, guard, max_grow, sgp; 4686 vm_prot_t prot, max; 4687 rlim_t lmemlim, stacklim, vmemlim; 4688 int rv, rv1 __diagused; 4689 bool gap_deleted, is_procstack; 4690 #ifdef notyet 4691 uint64_t limit; 4692 #endif 4693 #ifdef RACCT 4694 int error __diagused; 4695 #endif 4696 4697 p = curproc; 4698 vm = p->p_vmspace; 4699 4700 /* 4701 * Disallow stack growth when the access is performed by a 4702 * debugger or AIO daemon. The reason is that the wrong 4703 * resource limits are applied. 4704 */ 4705 if (p != initproc && (map != &p->p_vmspace->vm_map || 4706 p->p_textvp == NULL)) 4707 return (KERN_FAILURE); 4708 4709 MPASS(!map->system_map); 4710 4711 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4712 stacklim = lim_cur(curthread, RLIMIT_STACK); 4713 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4714 retry: 4715 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4716 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4717 return (KERN_FAILURE); 4718 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4719 return (KERN_SUCCESS); 4720 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0) { 4721 stack_entry = vm_map_entry_succ(gap_entry); 4722 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4723 stack_entry->start != gap_entry->end) 4724 return (KERN_FAILURE); 4725 grow_amount = round_page(stack_entry->start - addr); 4726 } else { 4727 return (KERN_FAILURE); 4728 } 4729 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4730 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4731 gap_entry->next_read; 4732 max_grow = gap_entry->end - gap_entry->start; 4733 if (guard > max_grow) 4734 return (KERN_NO_SPACE); 4735 max_grow -= guard; 4736 if (grow_amount > max_grow) 4737 return (KERN_NO_SPACE); 4738 4739 /* 4740 * If this is the main process stack, see if we're over the stack 4741 * limit. 4742 */ 4743 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4744 addr < (vm_offset_t)vm->vm_stacktop; 4745 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4746 return (KERN_NO_SPACE); 4747 4748 #ifdef RACCT 4749 if (racct_enable) { 4750 PROC_LOCK(p); 4751 if (is_procstack && racct_set(p, RACCT_STACK, 4752 ctob(vm->vm_ssize) + grow_amount)) { 4753 PROC_UNLOCK(p); 4754 return (KERN_NO_SPACE); 4755 } 4756 PROC_UNLOCK(p); 4757 } 4758 #endif 4759 4760 grow_amount = roundup(grow_amount, sgrowsiz); 4761 if (grow_amount > max_grow) 4762 grow_amount = max_grow; 4763 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4764 grow_amount = trunc_page((vm_size_t)stacklim) - 4765 ctob(vm->vm_ssize); 4766 } 4767 4768 #ifdef notyet 4769 PROC_LOCK(p); 4770 limit = racct_get_available(p, RACCT_STACK); 4771 PROC_UNLOCK(p); 4772 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4773 grow_amount = limit - ctob(vm->vm_ssize); 4774 #endif 4775 4776 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4777 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4778 rv = KERN_NO_SPACE; 4779 goto out; 4780 } 4781 #ifdef RACCT 4782 if (racct_enable) { 4783 PROC_LOCK(p); 4784 if (racct_set(p, RACCT_MEMLOCK, 4785 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4786 PROC_UNLOCK(p); 4787 rv = KERN_NO_SPACE; 4788 goto out; 4789 } 4790 PROC_UNLOCK(p); 4791 } 4792 #endif 4793 } 4794 4795 /* If we would blow our VMEM resource limit, no go */ 4796 if (map->size + grow_amount > vmemlim) { 4797 rv = KERN_NO_SPACE; 4798 goto out; 4799 } 4800 #ifdef RACCT 4801 if (racct_enable) { 4802 PROC_LOCK(p); 4803 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4804 PROC_UNLOCK(p); 4805 rv = KERN_NO_SPACE; 4806 goto out; 4807 } 4808 PROC_UNLOCK(p); 4809 } 4810 #endif 4811 4812 if (vm_map_lock_upgrade(map)) { 4813 gap_entry = NULL; 4814 vm_map_lock_read(map); 4815 goto retry; 4816 } 4817 4818 /* 4819 * The gap_entry "offset" field is overloaded. See 4820 * vm_map_stack_locked(). 4821 */ 4822 prot = PROT_EXTRACT(gap_entry->offset); 4823 max = PROT_MAX_EXTRACT(gap_entry->offset); 4824 sgp = gap_entry->next_read; 4825 4826 grow_start = gap_entry->end - grow_amount; 4827 if (gap_entry->start + grow_amount == gap_entry->end) { 4828 gap_start = gap_entry->start; 4829 gap_end = gap_entry->end; 4830 vm_map_entry_delete(map, gap_entry); 4831 gap_deleted = true; 4832 } else { 4833 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4834 vm_map_entry_resize(map, gap_entry, -grow_amount); 4835 gap_deleted = false; 4836 } 4837 rv = vm_map_insert(map, NULL, 0, grow_start, 4838 grow_start + grow_amount, prot, max, MAP_STACK_AREA); 4839 if (rv != KERN_SUCCESS) { 4840 if (gap_deleted) { 4841 rv1 = vm_map_insert1(map, NULL, 0, gap_start, 4842 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4843 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP, 4844 &gap_entry); 4845 MPASS(rv1 == KERN_SUCCESS); 4846 gap_entry->next_read = sgp; 4847 gap_entry->offset = prot | PROT_MAX(max); 4848 } else { 4849 vm_map_entry_resize(map, gap_entry, 4850 grow_amount); 4851 } 4852 } 4853 if (rv == KERN_SUCCESS && is_procstack) 4854 vm->vm_ssize += btoc(grow_amount); 4855 4856 /* 4857 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4858 */ 4859 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4860 rv = vm_map_wire_locked(map, grow_start, 4861 grow_start + grow_amount, 4862 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4863 } 4864 vm_map_lock_downgrade(map); 4865 4866 out: 4867 #ifdef RACCT 4868 if (racct_enable && rv != KERN_SUCCESS) { 4869 PROC_LOCK(p); 4870 error = racct_set(p, RACCT_VMEM, map->size); 4871 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4872 if (!old_mlock) { 4873 error = racct_set(p, RACCT_MEMLOCK, 4874 ptoa(pmap_wired_count(map->pmap))); 4875 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4876 } 4877 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4878 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4879 PROC_UNLOCK(p); 4880 } 4881 #endif 4882 4883 return (rv); 4884 } 4885 4886 /* 4887 * Unshare the specified VM space for exec. If other processes are 4888 * mapped to it, then create a new one. The new vmspace is null. 4889 */ 4890 int 4891 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4892 { 4893 struct vmspace *oldvmspace = p->p_vmspace; 4894 struct vmspace *newvmspace; 4895 4896 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4897 ("vmspace_exec recursed")); 4898 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4899 if (newvmspace == NULL) 4900 return (ENOMEM); 4901 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4902 /* 4903 * This code is written like this for prototype purposes. The 4904 * goal is to avoid running down the vmspace here, but let the 4905 * other process's that are still using the vmspace to finally 4906 * run it down. Even though there is little or no chance of blocking 4907 * here, it is a good idea to keep this form for future mods. 4908 */ 4909 PROC_VMSPACE_LOCK(p); 4910 p->p_vmspace = newvmspace; 4911 PROC_VMSPACE_UNLOCK(p); 4912 if (p == curthread->td_proc) 4913 pmap_activate(curthread); 4914 curthread->td_pflags |= TDP_EXECVMSPC; 4915 return (0); 4916 } 4917 4918 /* 4919 * Unshare the specified VM space for forcing COW. This 4920 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4921 */ 4922 int 4923 vmspace_unshare(struct proc *p) 4924 { 4925 struct vmspace *oldvmspace = p->p_vmspace; 4926 struct vmspace *newvmspace; 4927 vm_ooffset_t fork_charge; 4928 4929 /* 4930 * The caller is responsible for ensuring that the reference count 4931 * cannot concurrently transition 1 -> 2. 4932 */ 4933 if (refcount_load(&oldvmspace->vm_refcnt) == 1) 4934 return (0); 4935 fork_charge = 0; 4936 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4937 if (newvmspace == NULL) 4938 return (ENOMEM); 4939 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4940 vmspace_free(newvmspace); 4941 return (ENOMEM); 4942 } 4943 PROC_VMSPACE_LOCK(p); 4944 p->p_vmspace = newvmspace; 4945 PROC_VMSPACE_UNLOCK(p); 4946 if (p == curthread->td_proc) 4947 pmap_activate(curthread); 4948 vmspace_free(oldvmspace); 4949 return (0); 4950 } 4951 4952 /* 4953 * vm_map_lookup: 4954 * 4955 * Finds the VM object, offset, and 4956 * protection for a given virtual address in the 4957 * specified map, assuming a page fault of the 4958 * type specified. 4959 * 4960 * Leaves the map in question locked for read; return 4961 * values are guaranteed until a vm_map_lookup_done 4962 * call is performed. Note that the map argument 4963 * is in/out; the returned map must be used in 4964 * the call to vm_map_lookup_done. 4965 * 4966 * A handle (out_entry) is returned for use in 4967 * vm_map_lookup_done, to make that fast. 4968 * 4969 * If a lookup is requested with "write protection" 4970 * specified, the map may be changed to perform virtual 4971 * copying operations, although the data referenced will 4972 * remain the same. 4973 */ 4974 int 4975 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4976 vm_offset_t vaddr, 4977 vm_prot_t fault_typea, 4978 vm_map_entry_t *out_entry, /* OUT */ 4979 vm_object_t *object, /* OUT */ 4980 vm_pindex_t *pindex, /* OUT */ 4981 vm_prot_t *out_prot, /* OUT */ 4982 boolean_t *wired) /* OUT */ 4983 { 4984 vm_map_entry_t entry; 4985 vm_map_t map = *var_map; 4986 vm_prot_t prot; 4987 vm_prot_t fault_type; 4988 vm_object_t eobject; 4989 vm_size_t size; 4990 struct ucred *cred; 4991 4992 RetryLookup: 4993 4994 vm_map_lock_read(map); 4995 4996 RetryLookupLocked: 4997 /* 4998 * Lookup the faulting address. 4999 */ 5000 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 5001 vm_map_unlock_read(map); 5002 return (KERN_INVALID_ADDRESS); 5003 } 5004 5005 entry = *out_entry; 5006 5007 /* 5008 * Handle submaps. 5009 */ 5010 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5011 vm_map_t old_map = map; 5012 5013 *var_map = map = entry->object.sub_map; 5014 vm_map_unlock_read(old_map); 5015 goto RetryLookup; 5016 } 5017 5018 /* 5019 * Check whether this task is allowed to have this page. 5020 */ 5021 prot = entry->protection; 5022 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 5023 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 5024 if (prot == VM_PROT_NONE && map != kernel_map && 5025 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 5026 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 && 5027 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 5028 goto RetryLookupLocked; 5029 } 5030 fault_type = fault_typea & VM_PROT_ALL; 5031 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 5032 vm_map_unlock_read(map); 5033 return (KERN_PROTECTION_FAILURE); 5034 } 5035 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 5036 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 5037 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 5038 ("entry %p flags %x", entry, entry->eflags)); 5039 if ((fault_typea & VM_PROT_COPY) != 0 && 5040 (entry->max_protection & VM_PROT_WRITE) == 0 && 5041 (entry->eflags & MAP_ENTRY_COW) == 0) { 5042 vm_map_unlock_read(map); 5043 return (KERN_PROTECTION_FAILURE); 5044 } 5045 5046 /* 5047 * If this page is not pageable, we have to get it for all possible 5048 * accesses. 5049 */ 5050 *wired = (entry->wired_count != 0); 5051 if (*wired) 5052 fault_type = entry->protection; 5053 size = entry->end - entry->start; 5054 5055 /* 5056 * If the entry was copy-on-write, we either ... 5057 */ 5058 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5059 /* 5060 * If we want to write the page, we may as well handle that 5061 * now since we've got the map locked. 5062 * 5063 * If we don't need to write the page, we just demote the 5064 * permissions allowed. 5065 */ 5066 if ((fault_type & VM_PROT_WRITE) != 0 || 5067 (fault_typea & VM_PROT_COPY) != 0) { 5068 /* 5069 * Make a new object, and place it in the object 5070 * chain. Note that no new references have appeared 5071 * -- one just moved from the map to the new 5072 * object. 5073 */ 5074 if (vm_map_lock_upgrade(map)) 5075 goto RetryLookup; 5076 5077 if (entry->cred == NULL) { 5078 /* 5079 * The debugger owner is charged for 5080 * the memory. 5081 */ 5082 cred = curthread->td_ucred; 5083 crhold(cred); 5084 if (!swap_reserve_by_cred(size, cred)) { 5085 crfree(cred); 5086 vm_map_unlock(map); 5087 return (KERN_RESOURCE_SHORTAGE); 5088 } 5089 entry->cred = cred; 5090 } 5091 eobject = entry->object.vm_object; 5092 vm_object_shadow(&entry->object.vm_object, 5093 &entry->offset, size, entry->cred, false); 5094 if (eobject == entry->object.vm_object) { 5095 /* 5096 * The object was not shadowed. 5097 */ 5098 swap_release_by_cred(size, entry->cred); 5099 crfree(entry->cred); 5100 } 5101 entry->cred = NULL; 5102 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 5103 5104 vm_map_lock_downgrade(map); 5105 } else { 5106 /* 5107 * We're attempting to read a copy-on-write page -- 5108 * don't allow writes. 5109 */ 5110 prot &= ~VM_PROT_WRITE; 5111 } 5112 } 5113 5114 /* 5115 * Create an object if necessary. 5116 */ 5117 if (entry->object.vm_object == NULL && !map->system_map) { 5118 if (vm_map_lock_upgrade(map)) 5119 goto RetryLookup; 5120 entry->object.vm_object = vm_object_allocate_anon(atop(size), 5121 NULL, entry->cred, size); 5122 entry->offset = 0; 5123 entry->cred = NULL; 5124 vm_map_lock_downgrade(map); 5125 } 5126 5127 /* 5128 * Return the object/offset from this entry. If the entry was 5129 * copy-on-write or empty, it has been fixed up. 5130 */ 5131 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5132 *object = entry->object.vm_object; 5133 5134 *out_prot = prot; 5135 return (KERN_SUCCESS); 5136 } 5137 5138 /* 5139 * vm_map_lookup_locked: 5140 * 5141 * Lookup the faulting address. A version of vm_map_lookup that returns 5142 * KERN_FAILURE instead of blocking on map lock or memory allocation. 5143 */ 5144 int 5145 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 5146 vm_offset_t vaddr, 5147 vm_prot_t fault_typea, 5148 vm_map_entry_t *out_entry, /* OUT */ 5149 vm_object_t *object, /* OUT */ 5150 vm_pindex_t *pindex, /* OUT */ 5151 vm_prot_t *out_prot, /* OUT */ 5152 boolean_t *wired) /* OUT */ 5153 { 5154 vm_map_entry_t entry; 5155 vm_map_t map = *var_map; 5156 vm_prot_t prot; 5157 vm_prot_t fault_type = fault_typea; 5158 5159 /* 5160 * Lookup the faulting address. 5161 */ 5162 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 5163 return (KERN_INVALID_ADDRESS); 5164 5165 entry = *out_entry; 5166 5167 /* 5168 * Fail if the entry refers to a submap. 5169 */ 5170 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 5171 return (KERN_FAILURE); 5172 5173 /* 5174 * Check whether this task is allowed to have this page. 5175 */ 5176 prot = entry->protection; 5177 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 5178 if ((fault_type & prot) != fault_type) 5179 return (KERN_PROTECTION_FAILURE); 5180 5181 /* 5182 * If this page is not pageable, we have to get it for all possible 5183 * accesses. 5184 */ 5185 *wired = (entry->wired_count != 0); 5186 if (*wired) 5187 fault_type = entry->protection; 5188 5189 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 5190 /* 5191 * Fail if the entry was copy-on-write for a write fault. 5192 */ 5193 if (fault_type & VM_PROT_WRITE) 5194 return (KERN_FAILURE); 5195 /* 5196 * We're attempting to read a copy-on-write page -- 5197 * don't allow writes. 5198 */ 5199 prot &= ~VM_PROT_WRITE; 5200 } 5201 5202 /* 5203 * Fail if an object should be created. 5204 */ 5205 if (entry->object.vm_object == NULL && !map->system_map) 5206 return (KERN_FAILURE); 5207 5208 /* 5209 * Return the object/offset from this entry. If the entry was 5210 * copy-on-write or empty, it has been fixed up. 5211 */ 5212 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 5213 *object = entry->object.vm_object; 5214 5215 *out_prot = prot; 5216 return (KERN_SUCCESS); 5217 } 5218 5219 /* 5220 * vm_map_lookup_done: 5221 * 5222 * Releases locks acquired by a vm_map_lookup 5223 * (according to the handle returned by that lookup). 5224 */ 5225 void 5226 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 5227 { 5228 /* 5229 * Unlock the main-level map 5230 */ 5231 vm_map_unlock_read(map); 5232 } 5233 5234 vm_offset_t 5235 vm_map_max_KBI(const struct vm_map *map) 5236 { 5237 5238 return (vm_map_max(map)); 5239 } 5240 5241 vm_offset_t 5242 vm_map_min_KBI(const struct vm_map *map) 5243 { 5244 5245 return (vm_map_min(map)); 5246 } 5247 5248 pmap_t 5249 vm_map_pmap_KBI(vm_map_t map) 5250 { 5251 5252 return (map->pmap); 5253 } 5254 5255 bool 5256 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end) 5257 { 5258 5259 return (vm_map_range_valid(map, start, end)); 5260 } 5261 5262 #ifdef INVARIANTS 5263 static void 5264 _vm_map_assert_consistent(vm_map_t map, int check) 5265 { 5266 vm_map_entry_t entry, prev; 5267 vm_map_entry_t cur, header, lbound, ubound; 5268 vm_size_t max_left, max_right; 5269 5270 #ifdef DIAGNOSTIC 5271 ++map->nupdates; 5272 #endif 5273 if (enable_vmmap_check != check) 5274 return; 5275 5276 header = prev = &map->header; 5277 VM_MAP_ENTRY_FOREACH(entry, map) { 5278 KASSERT(prev->end <= entry->start, 5279 ("map %p prev->end = %jx, start = %jx", map, 5280 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5281 KASSERT(entry->start < entry->end, 5282 ("map %p start = %jx, end = %jx", map, 5283 (uintmax_t)entry->start, (uintmax_t)entry->end)); 5284 KASSERT(entry->left == header || 5285 entry->left->start < entry->start, 5286 ("map %p left->start = %jx, start = %jx", map, 5287 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 5288 KASSERT(entry->right == header || 5289 entry->start < entry->right->start, 5290 ("map %p start = %jx, right->start = %jx", map, 5291 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 5292 cur = map->root; 5293 lbound = ubound = header; 5294 for (;;) { 5295 if (entry->start < cur->start) { 5296 ubound = cur; 5297 cur = cur->left; 5298 KASSERT(cur != lbound, 5299 ("map %p cannot find %jx", 5300 map, (uintmax_t)entry->start)); 5301 } else if (cur->end <= entry->start) { 5302 lbound = cur; 5303 cur = cur->right; 5304 KASSERT(cur != ubound, 5305 ("map %p cannot find %jx", 5306 map, (uintmax_t)entry->start)); 5307 } else { 5308 KASSERT(cur == entry, 5309 ("map %p cannot find %jx", 5310 map, (uintmax_t)entry->start)); 5311 break; 5312 } 5313 } 5314 max_left = vm_map_entry_max_free_left(entry, lbound); 5315 max_right = vm_map_entry_max_free_right(entry, ubound); 5316 KASSERT(entry->max_free == vm_size_max(max_left, max_right), 5317 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 5318 (uintmax_t)entry->max_free, 5319 (uintmax_t)max_left, (uintmax_t)max_right)); 5320 prev = entry; 5321 } 5322 KASSERT(prev->end <= entry->start, 5323 ("map %p prev->end = %jx, start = %jx", map, 5324 (uintmax_t)prev->end, (uintmax_t)entry->start)); 5325 } 5326 #endif 5327 5328 #include "opt_ddb.h" 5329 #ifdef DDB 5330 #include <sys/kernel.h> 5331 5332 #include <ddb/ddb.h> 5333 5334 static void 5335 vm_map_print(vm_map_t map) 5336 { 5337 vm_map_entry_t entry, prev; 5338 5339 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 5340 (void *)map, 5341 (void *)map->pmap, map->nentries, map->timestamp); 5342 5343 db_indent += 2; 5344 prev = &map->header; 5345 VM_MAP_ENTRY_FOREACH(entry, map) { 5346 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 5347 (void *)entry, (void *)entry->start, (void *)entry->end, 5348 entry->eflags); 5349 { 5350 static const char * const inheritance_name[4] = 5351 {"share", "copy", "none", "donate_copy"}; 5352 5353 db_iprintf(" prot=%x/%x/%s", 5354 entry->protection, 5355 entry->max_protection, 5356 inheritance_name[(int)(unsigned char) 5357 entry->inheritance]); 5358 if (entry->wired_count != 0) 5359 db_printf(", wired"); 5360 } 5361 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 5362 db_printf(", share=%p, offset=0x%jx\n", 5363 (void *)entry->object.sub_map, 5364 (uintmax_t)entry->offset); 5365 if (prev == &map->header || 5366 prev->object.sub_map != 5367 entry->object.sub_map) { 5368 db_indent += 2; 5369 vm_map_print((vm_map_t)entry->object.sub_map); 5370 db_indent -= 2; 5371 } 5372 } else { 5373 if (entry->cred != NULL) 5374 db_printf(", ruid %d", entry->cred->cr_ruid); 5375 db_printf(", object=%p, offset=0x%jx", 5376 (void *)entry->object.vm_object, 5377 (uintmax_t)entry->offset); 5378 if (entry->object.vm_object && entry->object.vm_object->cred) 5379 db_printf(", obj ruid %d charge %jx", 5380 entry->object.vm_object->cred->cr_ruid, 5381 (uintmax_t)entry->object.vm_object->charge); 5382 if (entry->eflags & MAP_ENTRY_COW) 5383 db_printf(", copy (%s)", 5384 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 5385 db_printf("\n"); 5386 5387 if (prev == &map->header || 5388 prev->object.vm_object != 5389 entry->object.vm_object) { 5390 db_indent += 2; 5391 vm_object_print((db_expr_t)(intptr_t) 5392 entry->object.vm_object, 5393 0, 0, (char *)0); 5394 db_indent -= 2; 5395 } 5396 } 5397 prev = entry; 5398 } 5399 db_indent -= 2; 5400 } 5401 5402 DB_SHOW_COMMAND(map, map) 5403 { 5404 5405 if (!have_addr) { 5406 db_printf("usage: show map <addr>\n"); 5407 return; 5408 } 5409 vm_map_print((vm_map_t)addr); 5410 } 5411 5412 DB_SHOW_COMMAND(procvm, procvm) 5413 { 5414 struct proc *p; 5415 5416 if (have_addr) { 5417 p = db_lookup_proc(addr); 5418 } else { 5419 p = curproc; 5420 } 5421 5422 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 5423 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 5424 (void *)vmspace_pmap(p->p_vmspace)); 5425 5426 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 5427 } 5428 5429 #endif /* DDB */ 5430