1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/ktr.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/racct.h> 81 #include <sys/resourcevar.h> 82 #include <sys/rwlock.h> 83 #include <sys/file.h> 84 #include <sys/sysctl.h> 85 #include <sys/sysent.h> 86 #include <sys/shm.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_pager.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/uma.h> 100 101 /* 102 * Virtual memory maps provide for the mapping, protection, 103 * and sharing of virtual memory objects. In addition, 104 * this module provides for an efficient virtual copy of 105 * memory from one map to another. 106 * 107 * Synchronization is required prior to most operations. 108 * 109 * Maps consist of an ordered doubly-linked list of simple 110 * entries; a self-adjusting binary search tree of these 111 * entries is used to speed up lookups. 112 * 113 * Since portions of maps are specified by start/end addresses, 114 * which may not align with existing map entries, all 115 * routines merely "clip" entries to these start/end values. 116 * [That is, an entry is split into two, bordering at a 117 * start or end value.] Note that these clippings may not 118 * always be necessary (as the two resulting entries are then 119 * not changed); however, the clipping is done for convenience. 120 * 121 * As mentioned above, virtual copy operations are performed 122 * by copying VM object references from one map to 123 * another, and then marking both regions as copy-on-write. 124 */ 125 126 static struct mtx map_sleep_mtx; 127 static uma_zone_t mapentzone; 128 static uma_zone_t kmapentzone; 129 static uma_zone_t mapzone; 130 static uma_zone_t vmspace_zone; 131 static int vmspace_zinit(void *mem, int size, int flags); 132 static int vm_map_zinit(void *mem, int ize, int flags); 133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 134 vm_offset_t max); 135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 137 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 138 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 139 vm_map_entry_t gap_entry); 140 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 141 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 142 #ifdef INVARIANTS 143 static void vm_map_zdtor(void *mem, int size, void *arg); 144 static void vmspace_zdtor(void *mem, int size, void *arg); 145 #endif 146 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 147 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 148 int cow); 149 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 150 vm_offset_t failed_addr); 151 152 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 153 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 154 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 155 156 /* 157 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 158 * stable. 159 */ 160 #define PROC_VMSPACE_LOCK(p) do { } while (0) 161 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 162 163 /* 164 * VM_MAP_RANGE_CHECK: [ internal use only ] 165 * 166 * Asserts that the starting and ending region 167 * addresses fall within the valid range of the map. 168 */ 169 #define VM_MAP_RANGE_CHECK(map, start, end) \ 170 { \ 171 if (start < vm_map_min(map)) \ 172 start = vm_map_min(map); \ 173 if (end > vm_map_max(map)) \ 174 end = vm_map_max(map); \ 175 if (start > end) \ 176 start = end; \ 177 } 178 179 /* 180 * vm_map_startup: 181 * 182 * Initialize the vm_map module. Must be called before 183 * any other vm_map routines. 184 * 185 * Map and entry structures are allocated from the general 186 * purpose memory pool with some exceptions: 187 * 188 * - The kernel map and kmem submap are allocated statically. 189 * - Kernel map entries are allocated out of a static pool. 190 * 191 * These restrictions are necessary since malloc() uses the 192 * maps and requires map entries. 193 */ 194 195 void 196 vm_map_startup(void) 197 { 198 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 199 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 200 #ifdef INVARIANTS 201 vm_map_zdtor, 202 #else 203 NULL, 204 #endif 205 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 206 uma_prealloc(mapzone, MAX_KMAP); 207 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 208 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 209 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 210 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 212 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 213 #ifdef INVARIANTS 214 vmspace_zdtor, 215 #else 216 NULL, 217 #endif 218 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 219 } 220 221 static int 222 vmspace_zinit(void *mem, int size, int flags) 223 { 224 struct vmspace *vm; 225 226 vm = (struct vmspace *)mem; 227 228 vm->vm_map.pmap = NULL; 229 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 230 PMAP_LOCK_INIT(vmspace_pmap(vm)); 231 return (0); 232 } 233 234 static int 235 vm_map_zinit(void *mem, int size, int flags) 236 { 237 vm_map_t map; 238 239 map = (vm_map_t)mem; 240 memset(map, 0, sizeof(*map)); 241 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 242 sx_init(&map->lock, "vm map (user)"); 243 return (0); 244 } 245 246 #ifdef INVARIANTS 247 static void 248 vmspace_zdtor(void *mem, int size, void *arg) 249 { 250 struct vmspace *vm; 251 252 vm = (struct vmspace *)mem; 253 254 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 255 } 256 static void 257 vm_map_zdtor(void *mem, int size, void *arg) 258 { 259 vm_map_t map; 260 261 map = (vm_map_t)mem; 262 KASSERT(map->nentries == 0, 263 ("map %p nentries == %d on free.", 264 map, map->nentries)); 265 KASSERT(map->size == 0, 266 ("map %p size == %lu on free.", 267 map, (unsigned long)map->size)); 268 } 269 #endif /* INVARIANTS */ 270 271 /* 272 * Allocate a vmspace structure, including a vm_map and pmap, 273 * and initialize those structures. The refcnt is set to 1. 274 * 275 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 276 */ 277 struct vmspace * 278 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 279 { 280 struct vmspace *vm; 281 282 vm = uma_zalloc(vmspace_zone, M_WAITOK); 283 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 284 if (!pinit(vmspace_pmap(vm))) { 285 uma_zfree(vmspace_zone, vm); 286 return (NULL); 287 } 288 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 289 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 290 vm->vm_refcnt = 1; 291 vm->vm_shm = NULL; 292 vm->vm_swrss = 0; 293 vm->vm_tsize = 0; 294 vm->vm_dsize = 0; 295 vm->vm_ssize = 0; 296 vm->vm_taddr = 0; 297 vm->vm_daddr = 0; 298 vm->vm_maxsaddr = 0; 299 return (vm); 300 } 301 302 #ifdef RACCT 303 static void 304 vmspace_container_reset(struct proc *p) 305 { 306 307 PROC_LOCK(p); 308 racct_set(p, RACCT_DATA, 0); 309 racct_set(p, RACCT_STACK, 0); 310 racct_set(p, RACCT_RSS, 0); 311 racct_set(p, RACCT_MEMLOCK, 0); 312 racct_set(p, RACCT_VMEM, 0); 313 PROC_UNLOCK(p); 314 } 315 #endif 316 317 static inline void 318 vmspace_dofree(struct vmspace *vm) 319 { 320 321 CTR1(KTR_VM, "vmspace_free: %p", vm); 322 323 /* 324 * Make sure any SysV shm is freed, it might not have been in 325 * exit1(). 326 */ 327 shmexit(vm); 328 329 /* 330 * Lock the map, to wait out all other references to it. 331 * Delete all of the mappings and pages they hold, then call 332 * the pmap module to reclaim anything left. 333 */ 334 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 335 vm_map_max(&vm->vm_map)); 336 337 pmap_release(vmspace_pmap(vm)); 338 vm->vm_map.pmap = NULL; 339 uma_zfree(vmspace_zone, vm); 340 } 341 342 void 343 vmspace_free(struct vmspace *vm) 344 { 345 346 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 347 "vmspace_free() called"); 348 349 if (vm->vm_refcnt == 0) 350 panic("vmspace_free: attempt to free already freed vmspace"); 351 352 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 353 vmspace_dofree(vm); 354 } 355 356 void 357 vmspace_exitfree(struct proc *p) 358 { 359 struct vmspace *vm; 360 361 PROC_VMSPACE_LOCK(p); 362 vm = p->p_vmspace; 363 p->p_vmspace = NULL; 364 PROC_VMSPACE_UNLOCK(p); 365 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 366 vmspace_free(vm); 367 } 368 369 void 370 vmspace_exit(struct thread *td) 371 { 372 int refcnt; 373 struct vmspace *vm; 374 struct proc *p; 375 376 /* 377 * Release user portion of address space. 378 * This releases references to vnodes, 379 * which could cause I/O if the file has been unlinked. 380 * Need to do this early enough that we can still sleep. 381 * 382 * The last exiting process to reach this point releases as 383 * much of the environment as it can. vmspace_dofree() is the 384 * slower fallback in case another process had a temporary 385 * reference to the vmspace. 386 */ 387 388 p = td->td_proc; 389 vm = p->p_vmspace; 390 atomic_add_int(&vmspace0.vm_refcnt, 1); 391 refcnt = vm->vm_refcnt; 392 do { 393 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 394 /* Switch now since other proc might free vmspace */ 395 PROC_VMSPACE_LOCK(p); 396 p->p_vmspace = &vmspace0; 397 PROC_VMSPACE_UNLOCK(p); 398 pmap_activate(td); 399 } 400 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); 401 if (refcnt == 1) { 402 if (p->p_vmspace != vm) { 403 /* vmspace not yet freed, switch back */ 404 PROC_VMSPACE_LOCK(p); 405 p->p_vmspace = vm; 406 PROC_VMSPACE_UNLOCK(p); 407 pmap_activate(td); 408 } 409 pmap_remove_pages(vmspace_pmap(vm)); 410 /* Switch now since this proc will free vmspace */ 411 PROC_VMSPACE_LOCK(p); 412 p->p_vmspace = &vmspace0; 413 PROC_VMSPACE_UNLOCK(p); 414 pmap_activate(td); 415 vmspace_dofree(vm); 416 } 417 #ifdef RACCT 418 if (racct_enable) 419 vmspace_container_reset(p); 420 #endif 421 } 422 423 /* Acquire reference to vmspace owned by another process. */ 424 425 struct vmspace * 426 vmspace_acquire_ref(struct proc *p) 427 { 428 struct vmspace *vm; 429 int refcnt; 430 431 PROC_VMSPACE_LOCK(p); 432 vm = p->p_vmspace; 433 if (vm == NULL) { 434 PROC_VMSPACE_UNLOCK(p); 435 return (NULL); 436 } 437 refcnt = vm->vm_refcnt; 438 do { 439 if (refcnt <= 0) { /* Avoid 0->1 transition */ 440 PROC_VMSPACE_UNLOCK(p); 441 return (NULL); 442 } 443 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1)); 444 if (vm != p->p_vmspace) { 445 PROC_VMSPACE_UNLOCK(p); 446 vmspace_free(vm); 447 return (NULL); 448 } 449 PROC_VMSPACE_UNLOCK(p); 450 return (vm); 451 } 452 453 /* 454 * Switch between vmspaces in an AIO kernel process. 455 * 456 * The AIO kernel processes switch to and from a user process's 457 * vmspace while performing an I/O operation on behalf of a user 458 * process. The new vmspace is either the vmspace of a user process 459 * obtained from an active AIO request or the initial vmspace of the 460 * AIO kernel process (when it is idling). Because user processes 461 * will block to drain any active AIO requests before proceeding in 462 * exit() or execve(), the vmspace reference count for these vmspaces 463 * can never be 0. This allows for a much simpler implementation than 464 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel 465 * processes hold an extra reference on their initial vmspace for the 466 * life of the process so that this guarantee is true for any vmspace 467 * passed as 'newvm'. 468 */ 469 void 470 vmspace_switch_aio(struct vmspace *newvm) 471 { 472 struct vmspace *oldvm; 473 474 /* XXX: Need some way to assert that this is an aio daemon. */ 475 476 KASSERT(newvm->vm_refcnt > 0, 477 ("vmspace_switch_aio: newvm unreferenced")); 478 479 oldvm = curproc->p_vmspace; 480 if (oldvm == newvm) 481 return; 482 483 /* 484 * Point to the new address space and refer to it. 485 */ 486 curproc->p_vmspace = newvm; 487 atomic_add_int(&newvm->vm_refcnt, 1); 488 489 /* Activate the new mapping. */ 490 pmap_activate(curthread); 491 492 /* Remove the daemon's reference to the old address space. */ 493 KASSERT(oldvm->vm_refcnt > 1, 494 ("vmspace_switch_aio: oldvm dropping last reference")); 495 vmspace_free(oldvm); 496 } 497 498 void 499 _vm_map_lock(vm_map_t map, const char *file, int line) 500 { 501 502 if (map->system_map) 503 mtx_lock_flags_(&map->system_mtx, 0, file, line); 504 else 505 sx_xlock_(&map->lock, file, line); 506 map->timestamp++; 507 } 508 509 void 510 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 511 { 512 vm_object_t object, object1; 513 struct vnode *vp; 514 515 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 516 return; 517 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 518 ("Submap with execs")); 519 object = entry->object.vm_object; 520 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 521 VM_OBJECT_RLOCK(object); 522 while ((object1 = object->backing_object) != NULL) { 523 VM_OBJECT_RLOCK(object1); 524 VM_OBJECT_RUNLOCK(object); 525 object = object1; 526 } 527 528 /* 529 * For OBJT_DEAD objects, v_writecount was handled in 530 * vnode_pager_dealloc(). 531 */ 532 if (object->type != OBJT_DEAD) { 533 KASSERT(((object->flags & OBJ_TMPFS) == 0 && 534 object->type == OBJT_VNODE) || 535 ((object->flags & OBJ_TMPFS) != 0 && 536 object->type == OBJT_SWAP), 537 ("vm_map_entry_set_vnode_text: wrong object type, " 538 "entry %p, object %p, add %d", entry, object, add)); 539 vp = (object->flags & OBJ_TMPFS) == 0 ? object->handle : 540 object->un_pager.swp.swp_tmpfs; 541 if (add) 542 VOP_SET_TEXT_CHECKED(vp); 543 else 544 VOP_UNSET_TEXT_CHECKED(vp); 545 } 546 VM_OBJECT_RUNLOCK(object); 547 } 548 549 static void 550 vm_map_process_deferred(void) 551 { 552 struct thread *td; 553 vm_map_entry_t entry, next; 554 vm_object_t object; 555 556 td = curthread; 557 entry = td->td_map_def_user; 558 td->td_map_def_user = NULL; 559 while (entry != NULL) { 560 next = entry->next; 561 MPASS((entry->eflags & (MAP_ENTRY_VN_WRITECNT | 562 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_VN_WRITECNT | 563 MAP_ENTRY_VN_EXEC)); 564 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 565 /* 566 * Decrement the object's writemappings and 567 * possibly the vnode's v_writecount. 568 */ 569 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 570 ("Submap with writecount")); 571 object = entry->object.vm_object; 572 KASSERT(object != NULL, ("No object for writecount")); 573 vnode_pager_release_writecount(object, entry->start, 574 entry->end); 575 } 576 vm_map_entry_set_vnode_text(entry, false); 577 vm_map_entry_deallocate(entry, FALSE); 578 entry = next; 579 } 580 } 581 582 void 583 _vm_map_unlock(vm_map_t map, const char *file, int line) 584 { 585 586 if (map->system_map) 587 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 588 else { 589 sx_xunlock_(&map->lock, file, line); 590 vm_map_process_deferred(); 591 } 592 } 593 594 void 595 _vm_map_lock_read(vm_map_t map, const char *file, int line) 596 { 597 598 if (map->system_map) 599 mtx_lock_flags_(&map->system_mtx, 0, file, line); 600 else 601 sx_slock_(&map->lock, file, line); 602 } 603 604 void 605 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 606 { 607 608 if (map->system_map) 609 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 610 else { 611 sx_sunlock_(&map->lock, file, line); 612 vm_map_process_deferred(); 613 } 614 } 615 616 int 617 _vm_map_trylock(vm_map_t map, const char *file, int line) 618 { 619 int error; 620 621 error = map->system_map ? 622 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 623 !sx_try_xlock_(&map->lock, file, line); 624 if (error == 0) 625 map->timestamp++; 626 return (error == 0); 627 } 628 629 int 630 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 631 { 632 int error; 633 634 error = map->system_map ? 635 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 636 !sx_try_slock_(&map->lock, file, line); 637 return (error == 0); 638 } 639 640 /* 641 * _vm_map_lock_upgrade: [ internal use only ] 642 * 643 * Tries to upgrade a read (shared) lock on the specified map to a write 644 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 645 * non-zero value if the upgrade fails. If the upgrade fails, the map is 646 * returned without a read or write lock held. 647 * 648 * Requires that the map be read locked. 649 */ 650 int 651 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 652 { 653 unsigned int last_timestamp; 654 655 if (map->system_map) { 656 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 657 } else { 658 if (!sx_try_upgrade_(&map->lock, file, line)) { 659 last_timestamp = map->timestamp; 660 sx_sunlock_(&map->lock, file, line); 661 vm_map_process_deferred(); 662 /* 663 * If the map's timestamp does not change while the 664 * map is unlocked, then the upgrade succeeds. 665 */ 666 sx_xlock_(&map->lock, file, line); 667 if (last_timestamp != map->timestamp) { 668 sx_xunlock_(&map->lock, file, line); 669 return (1); 670 } 671 } 672 } 673 map->timestamp++; 674 return (0); 675 } 676 677 void 678 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 679 { 680 681 if (map->system_map) { 682 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 683 } else 684 sx_downgrade_(&map->lock, file, line); 685 } 686 687 /* 688 * vm_map_locked: 689 * 690 * Returns a non-zero value if the caller holds a write (exclusive) lock 691 * on the specified map and the value "0" otherwise. 692 */ 693 int 694 vm_map_locked(vm_map_t map) 695 { 696 697 if (map->system_map) 698 return (mtx_owned(&map->system_mtx)); 699 else 700 return (sx_xlocked(&map->lock)); 701 } 702 703 #ifdef INVARIANTS 704 static void 705 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 706 { 707 708 if (map->system_map) 709 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 710 else 711 sx_assert_(&map->lock, SA_XLOCKED, file, line); 712 } 713 714 #define VM_MAP_ASSERT_LOCKED(map) \ 715 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 716 717 #ifdef DIAGNOSTIC 718 static int enable_vmmap_check = 1; 719 #else 720 static int enable_vmmap_check = 0; 721 #endif 722 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 723 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 724 725 static void 726 _vm_map_assert_consistent(vm_map_t map) 727 { 728 vm_map_entry_t entry; 729 vm_map_entry_t child; 730 vm_size_t max_left, max_right; 731 732 if (!enable_vmmap_check) 733 return; 734 735 for (entry = map->header.next; entry != &map->header; 736 entry = entry->next) { 737 KASSERT(entry->prev->end <= entry->start, 738 ("map %p prev->end = %jx, start = %jx", map, 739 (uintmax_t)entry->prev->end, (uintmax_t)entry->start)); 740 KASSERT(entry->start < entry->end, 741 ("map %p start = %jx, end = %jx", map, 742 (uintmax_t)entry->start, (uintmax_t)entry->end)); 743 KASSERT(entry->end <= entry->next->start, 744 ("map %p end = %jx, next->start = %jx", map, 745 (uintmax_t)entry->end, (uintmax_t)entry->next->start)); 746 KASSERT(entry->left == NULL || 747 entry->left->start < entry->start, 748 ("map %p left->start = %jx, start = %jx", map, 749 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 750 KASSERT(entry->right == NULL || 751 entry->start < entry->right->start, 752 ("map %p start = %jx, right->start = %jx", map, 753 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 754 child = entry->left; 755 max_left = (child != NULL) ? child->max_free : 756 entry->start - entry->prev->end; 757 child = entry->right; 758 max_right = (child != NULL) ? child->max_free : 759 entry->next->start - entry->end; 760 KASSERT(entry->max_free == MAX(max_left, max_right), 761 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 762 (uintmax_t)entry->max_free, 763 (uintmax_t)max_left, (uintmax_t)max_right)); 764 } 765 } 766 767 #define VM_MAP_ASSERT_CONSISTENT(map) \ 768 _vm_map_assert_consistent(map) 769 #else 770 #define VM_MAP_ASSERT_LOCKED(map) 771 #define VM_MAP_ASSERT_CONSISTENT(map) 772 #endif /* INVARIANTS */ 773 774 /* 775 * _vm_map_unlock_and_wait: 776 * 777 * Atomically releases the lock on the specified map and puts the calling 778 * thread to sleep. The calling thread will remain asleep until either 779 * vm_map_wakeup() is performed on the map or the specified timeout is 780 * exceeded. 781 * 782 * WARNING! This function does not perform deferred deallocations of 783 * objects and map entries. Therefore, the calling thread is expected to 784 * reacquire the map lock after reawakening and later perform an ordinary 785 * unlock operation, such as vm_map_unlock(), before completing its 786 * operation on the map. 787 */ 788 int 789 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 790 { 791 792 mtx_lock(&map_sleep_mtx); 793 if (map->system_map) 794 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 795 else 796 sx_xunlock_(&map->lock, file, line); 797 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 798 timo)); 799 } 800 801 /* 802 * vm_map_wakeup: 803 * 804 * Awaken any threads that have slept on the map using 805 * vm_map_unlock_and_wait(). 806 */ 807 void 808 vm_map_wakeup(vm_map_t map) 809 { 810 811 /* 812 * Acquire and release map_sleep_mtx to prevent a wakeup() 813 * from being performed (and lost) between the map unlock 814 * and the msleep() in _vm_map_unlock_and_wait(). 815 */ 816 mtx_lock(&map_sleep_mtx); 817 mtx_unlock(&map_sleep_mtx); 818 wakeup(&map->root); 819 } 820 821 void 822 vm_map_busy(vm_map_t map) 823 { 824 825 VM_MAP_ASSERT_LOCKED(map); 826 map->busy++; 827 } 828 829 void 830 vm_map_unbusy(vm_map_t map) 831 { 832 833 VM_MAP_ASSERT_LOCKED(map); 834 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 835 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 836 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 837 wakeup(&map->busy); 838 } 839 } 840 841 void 842 vm_map_wait_busy(vm_map_t map) 843 { 844 845 VM_MAP_ASSERT_LOCKED(map); 846 while (map->busy) { 847 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 848 if (map->system_map) 849 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 850 else 851 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 852 } 853 map->timestamp++; 854 } 855 856 long 857 vmspace_resident_count(struct vmspace *vmspace) 858 { 859 return pmap_resident_count(vmspace_pmap(vmspace)); 860 } 861 862 /* 863 * vm_map_create: 864 * 865 * Creates and returns a new empty VM map with 866 * the given physical map structure, and having 867 * the given lower and upper address bounds. 868 */ 869 vm_map_t 870 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 871 { 872 vm_map_t result; 873 874 result = uma_zalloc(mapzone, M_WAITOK); 875 CTR1(KTR_VM, "vm_map_create: %p", result); 876 _vm_map_init(result, pmap, min, max); 877 return (result); 878 } 879 880 /* 881 * Initialize an existing vm_map structure 882 * such as that in the vmspace structure. 883 */ 884 static void 885 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 886 { 887 888 map->header.next = map->header.prev = &map->header; 889 map->header.eflags = MAP_ENTRY_HEADER; 890 map->needs_wakeup = FALSE; 891 map->system_map = 0; 892 map->pmap = pmap; 893 map->header.end = min; 894 map->header.start = max; 895 map->flags = 0; 896 map->root = NULL; 897 map->timestamp = 0; 898 map->busy = 0; 899 map->anon_loc = 0; 900 } 901 902 void 903 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 904 { 905 906 _vm_map_init(map, pmap, min, max); 907 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 908 sx_init(&map->lock, "user map"); 909 } 910 911 /* 912 * vm_map_entry_dispose: [ internal use only ] 913 * 914 * Inverse of vm_map_entry_create. 915 */ 916 static void 917 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 918 { 919 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 920 } 921 922 /* 923 * vm_map_entry_create: [ internal use only ] 924 * 925 * Allocates a VM map entry for insertion. 926 * No entry fields are filled in. 927 */ 928 static vm_map_entry_t 929 vm_map_entry_create(vm_map_t map) 930 { 931 vm_map_entry_t new_entry; 932 933 if (map->system_map) 934 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 935 else 936 new_entry = uma_zalloc(mapentzone, M_WAITOK); 937 if (new_entry == NULL) 938 panic("vm_map_entry_create: kernel resources exhausted"); 939 return (new_entry); 940 } 941 942 /* 943 * vm_map_entry_set_behavior: 944 * 945 * Set the expected access behavior, either normal, random, or 946 * sequential. 947 */ 948 static inline void 949 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 950 { 951 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 952 (behavior & MAP_ENTRY_BEHAV_MASK); 953 } 954 955 /* 956 * vm_map_entry_set_max_free: 957 * 958 * Set the max_free field in a vm_map_entry. 959 */ 960 static inline void 961 vm_map_entry_set_max_free(vm_map_entry_t entry) 962 { 963 vm_map_entry_t child; 964 vm_size_t max_left, max_right; 965 966 child = entry->left; 967 max_left = (child != NULL) ? child->max_free : 968 entry->start - entry->prev->end; 969 child = entry->right; 970 max_right = (child != NULL) ? child->max_free : 971 entry->next->start - entry->end; 972 entry->max_free = MAX(max_left, max_right); 973 } 974 975 #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ 976 y = root->left; \ 977 if (y != NULL && (test)) { \ 978 /* Rotate right and make y root. */ \ 979 root->left = y->right; \ 980 y->right = root; \ 981 vm_map_entry_set_max_free(root); \ 982 root = y; \ 983 y = root->left; \ 984 } \ 985 /* Put root on rlist. */ \ 986 root->left = rlist; \ 987 rlist = root; \ 988 root = y; \ 989 } while (0) 990 991 #define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ 992 y = root->right; \ 993 if (y != NULL && (test)) { \ 994 /* Rotate left and make y root. */ \ 995 root->right = y->left; \ 996 y->left = root; \ 997 vm_map_entry_set_max_free(root); \ 998 root = y; \ 999 y = root->right; \ 1000 } \ 1001 /* Put root on llist. */ \ 1002 root->right = llist; \ 1003 llist = root; \ 1004 root = y; \ 1005 } while (0) 1006 1007 /* 1008 * Walk down the tree until we find addr or a NULL pointer where addr would go, 1009 * breaking off left and right subtrees of nodes less than, or greater than 1010 * addr. Treat pointers to nodes with max_free < length as NULL pointers. 1011 * llist and rlist are the two sides in reverse order (bottom-up), with llist 1012 * linked by the right pointer and rlist linked by the left pointer in the 1013 * vm_map_entry. 1014 */ 1015 static vm_map_entry_t 1016 vm_map_splay_split(vm_offset_t addr, vm_size_t length, 1017 vm_map_entry_t root, vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist) 1018 { 1019 vm_map_entry_t llist, rlist; 1020 vm_map_entry_t y; 1021 1022 llist = NULL; 1023 rlist = NULL; 1024 while (root != NULL && root->max_free >= length) { 1025 if (addr < root->start) { 1026 SPLAY_LEFT_STEP(root, y, rlist, 1027 y->max_free >= length && addr < y->start); 1028 } else if (addr >= root->end) { 1029 SPLAY_RIGHT_STEP(root, y, llist, 1030 y->max_free >= length && addr >= y->end); 1031 } else 1032 break; 1033 } 1034 *out_llist = llist; 1035 *out_rlist = rlist; 1036 return (root); 1037 } 1038 1039 static void 1040 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) 1041 { 1042 vm_map_entry_t rlist, y; 1043 1044 root = root->right; 1045 rlist = *iolist; 1046 while (root != NULL) 1047 SPLAY_LEFT_STEP(root, y, rlist, true); 1048 *iolist = rlist; 1049 } 1050 1051 static void 1052 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) 1053 { 1054 vm_map_entry_t llist, y; 1055 1056 root = root->left; 1057 llist = *iolist; 1058 while (root != NULL) 1059 SPLAY_RIGHT_STEP(root, y, llist, true); 1060 *iolist = llist; 1061 } 1062 1063 /* 1064 * Walk back up the two spines, flip the pointers and set max_free. The 1065 * subtrees of the root go at the bottom of llist and rlist. 1066 */ 1067 static vm_map_entry_t 1068 vm_map_splay_merge(vm_map_entry_t root, 1069 vm_map_entry_t llist, vm_map_entry_t rlist, 1070 vm_map_entry_t ltree, vm_map_entry_t rtree) 1071 { 1072 vm_map_entry_t y; 1073 1074 while (llist != NULL) { 1075 y = llist->right; 1076 llist->right = ltree; 1077 vm_map_entry_set_max_free(llist); 1078 ltree = llist; 1079 llist = y; 1080 } 1081 while (rlist != NULL) { 1082 y = rlist->left; 1083 rlist->left = rtree; 1084 vm_map_entry_set_max_free(rlist); 1085 rtree = rlist; 1086 rlist = y; 1087 } 1088 1089 /* 1090 * Final assembly: add ltree and rtree as subtrees of root. 1091 */ 1092 root->left = ltree; 1093 root->right = rtree; 1094 vm_map_entry_set_max_free(root); 1095 1096 return (root); 1097 } 1098 1099 /* 1100 * vm_map_entry_splay: 1101 * 1102 * The Sleator and Tarjan top-down splay algorithm with the 1103 * following variation. Max_free must be computed bottom-up, so 1104 * on the downward pass, maintain the left and right spines in 1105 * reverse order. Then, make a second pass up each side to fix 1106 * the pointers and compute max_free. The time bound is O(log n) 1107 * amortized. 1108 * 1109 * The new root is the vm_map_entry containing "addr", or else an 1110 * adjacent entry (lower if possible) if addr is not in the tree. 1111 * 1112 * The map must be locked, and leaves it so. 1113 * 1114 * Returns: the new root. 1115 */ 1116 static vm_map_entry_t 1117 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 1118 { 1119 vm_map_entry_t llist, rlist; 1120 1121 root = vm_map_splay_split(addr, 0, root, &llist, &rlist); 1122 if (root != NULL) { 1123 /* do nothing */ 1124 } else if (llist != NULL) { 1125 /* 1126 * Recover the greatest node in the left 1127 * subtree and make it the root. 1128 */ 1129 root = llist; 1130 llist = root->right; 1131 root->right = NULL; 1132 } else if (rlist != NULL) { 1133 /* 1134 * Recover the least node in the right 1135 * subtree and make it the root. 1136 */ 1137 root = rlist; 1138 rlist = root->left; 1139 root->left = NULL; 1140 } else { 1141 /* There is no root. */ 1142 return (NULL); 1143 } 1144 return (vm_map_splay_merge(root, llist, rlist, 1145 root->left, root->right)); 1146 } 1147 1148 /* 1149 * vm_map_entry_{un,}link: 1150 * 1151 * Insert/remove entries from maps. 1152 */ 1153 static void 1154 vm_map_entry_link(vm_map_t map, 1155 vm_map_entry_t entry) 1156 { 1157 vm_map_entry_t llist, rlist, root; 1158 1159 CTR3(KTR_VM, 1160 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1161 map->nentries, entry); 1162 VM_MAP_ASSERT_LOCKED(map); 1163 map->nentries++; 1164 root = map->root; 1165 root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); 1166 KASSERT(root == NULL, 1167 ("vm_map_entry_link: link object already mapped")); 1168 entry->prev = (llist == NULL) ? &map->header : llist; 1169 entry->next = (rlist == NULL) ? &map->header : rlist; 1170 entry->prev->next = entry->next->prev = entry; 1171 root = vm_map_splay_merge(entry, llist, rlist, NULL, NULL); 1172 map->root = entry; 1173 VM_MAP_ASSERT_CONSISTENT(map); 1174 } 1175 1176 enum unlink_merge_type { 1177 UNLINK_MERGE_PREV, 1178 UNLINK_MERGE_NONE, 1179 UNLINK_MERGE_NEXT 1180 }; 1181 1182 static void 1183 vm_map_entry_unlink(vm_map_t map, 1184 vm_map_entry_t entry, 1185 enum unlink_merge_type op) 1186 { 1187 vm_map_entry_t llist, rlist, root, y; 1188 1189 VM_MAP_ASSERT_LOCKED(map); 1190 llist = entry->prev; 1191 rlist = entry->next; 1192 llist->next = rlist; 1193 rlist->prev = llist; 1194 root = map->root; 1195 root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); 1196 KASSERT(root != NULL, 1197 ("vm_map_entry_unlink: unlink object not mapped")); 1198 1199 switch (op) { 1200 case UNLINK_MERGE_PREV: 1201 vm_map_splay_findprev(root, &llist); 1202 llist->end = root->end; 1203 y = root->right; 1204 root = llist; 1205 llist = root->right; 1206 root->right = y; 1207 break; 1208 case UNLINK_MERGE_NEXT: 1209 vm_map_splay_findnext(root, &rlist); 1210 rlist->start = root->start; 1211 rlist->offset = root->offset; 1212 y = root->left; 1213 root = rlist; 1214 rlist = root->left; 1215 root->left = y; 1216 break; 1217 case UNLINK_MERGE_NONE: 1218 vm_map_splay_findprev(root, &llist); 1219 vm_map_splay_findnext(root, &rlist); 1220 if (llist != NULL) { 1221 root = llist; 1222 llist = root->right; 1223 root->right = NULL; 1224 } else if (rlist != NULL) { 1225 root = rlist; 1226 rlist = root->left; 1227 root->left = NULL; 1228 } else 1229 root = NULL; 1230 break; 1231 } 1232 if (root != NULL) 1233 root = vm_map_splay_merge(root, llist, rlist, 1234 root->left, root->right); 1235 map->root = root; 1236 VM_MAP_ASSERT_CONSISTENT(map); 1237 map->nentries--; 1238 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1239 map->nentries, entry); 1240 } 1241 1242 /* 1243 * vm_map_entry_resize_free: 1244 * 1245 * Recompute the amount of free space following a modified vm_map_entry 1246 * and propagate those values up the tree. Call this function after 1247 * resizing a map entry in-place by changing the end value, without a 1248 * call to vm_map_entry_link() or _unlink(). 1249 * 1250 * The map must be locked, and leaves it so. 1251 */ 1252 static void 1253 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1254 { 1255 vm_map_entry_t llist, rlist, root; 1256 1257 VM_MAP_ASSERT_LOCKED(map); 1258 root = map->root; 1259 root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist); 1260 KASSERT(root != NULL, 1261 ("vm_map_entry_resize_free: resize_free object not mapped")); 1262 vm_map_splay_findnext(root, &rlist); 1263 root->right = NULL; 1264 map->root = vm_map_splay_merge(root, llist, rlist, 1265 root->left, root->right); 1266 VM_MAP_ASSERT_CONSISTENT(map); 1267 CTR3(KTR_VM, "vm_map_entry_resize_free: map %p, nentries %d, entry %p", map, 1268 map->nentries, entry); 1269 } 1270 1271 /* 1272 * vm_map_lookup_entry: [ internal use only ] 1273 * 1274 * Finds the map entry containing (or 1275 * immediately preceding) the specified address 1276 * in the given map; the entry is returned 1277 * in the "entry" parameter. The boolean 1278 * result indicates whether the address is 1279 * actually contained in the map. 1280 */ 1281 boolean_t 1282 vm_map_lookup_entry( 1283 vm_map_t map, 1284 vm_offset_t address, 1285 vm_map_entry_t *entry) /* OUT */ 1286 { 1287 vm_map_entry_t cur, lbound; 1288 boolean_t locked; 1289 1290 /* 1291 * If the map is empty, then the map entry immediately preceding 1292 * "address" is the map's header. 1293 */ 1294 cur = map->root; 1295 if (cur == NULL) { 1296 *entry = &map->header; 1297 return (FALSE); 1298 } 1299 if (address >= cur->start && cur->end > address) { 1300 *entry = cur; 1301 return (TRUE); 1302 } 1303 if ((locked = vm_map_locked(map)) || 1304 sx_try_upgrade(&map->lock)) { 1305 /* 1306 * Splay requires a write lock on the map. However, it only 1307 * restructures the binary search tree; it does not otherwise 1308 * change the map. Thus, the map's timestamp need not change 1309 * on a temporary upgrade. 1310 */ 1311 map->root = cur = vm_map_entry_splay(address, cur); 1312 VM_MAP_ASSERT_CONSISTENT(map); 1313 if (!locked) 1314 sx_downgrade(&map->lock); 1315 1316 /* 1317 * If "address" is contained within a map entry, the new root 1318 * is that map entry. Otherwise, the new root is a map entry 1319 * immediately before or after "address". 1320 */ 1321 if (address < cur->start) { 1322 *entry = &map->header; 1323 return (FALSE); 1324 } 1325 *entry = cur; 1326 return (address < cur->end); 1327 } 1328 /* 1329 * Since the map is only locked for read access, perform a 1330 * standard binary search tree lookup for "address". 1331 */ 1332 lbound = &map->header; 1333 do { 1334 if (address < cur->start) { 1335 cur = cur->left; 1336 } else if (cur->end <= address) { 1337 lbound = cur; 1338 cur = cur->right; 1339 } else { 1340 *entry = cur; 1341 return (TRUE); 1342 } 1343 } while (cur != NULL); 1344 *entry = lbound; 1345 return (FALSE); 1346 } 1347 1348 /* 1349 * vm_map_insert: 1350 * 1351 * Inserts the given whole VM object into the target 1352 * map at the specified address range. The object's 1353 * size should match that of the address range. 1354 * 1355 * Requires that the map be locked, and leaves it so. 1356 * 1357 * If object is non-NULL, ref count must be bumped by caller 1358 * prior to making call to account for the new entry. 1359 */ 1360 int 1361 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1362 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1363 { 1364 vm_map_entry_t new_entry, prev_entry, temp_entry; 1365 struct ucred *cred; 1366 vm_eflags_t protoeflags; 1367 vm_inherit_t inheritance; 1368 1369 VM_MAP_ASSERT_LOCKED(map); 1370 KASSERT(object != kernel_object || 1371 (cow & MAP_COPY_ON_WRITE) == 0, 1372 ("vm_map_insert: kernel object and COW")); 1373 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1374 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1375 KASSERT((prot & ~max) == 0, 1376 ("prot %#x is not subset of max_prot %#x", prot, max)); 1377 1378 /* 1379 * Check that the start and end points are not bogus. 1380 */ 1381 if (start < vm_map_min(map) || end > vm_map_max(map) || 1382 start >= end) 1383 return (KERN_INVALID_ADDRESS); 1384 1385 /* 1386 * Find the entry prior to the proposed starting address; if it's part 1387 * of an existing entry, this range is bogus. 1388 */ 1389 if (vm_map_lookup_entry(map, start, &temp_entry)) 1390 return (KERN_NO_SPACE); 1391 1392 prev_entry = temp_entry; 1393 1394 /* 1395 * Assert that the next entry doesn't overlap the end point. 1396 */ 1397 if (prev_entry->next->start < end) 1398 return (KERN_NO_SPACE); 1399 1400 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1401 max != VM_PROT_NONE)) 1402 return (KERN_INVALID_ARGUMENT); 1403 1404 protoeflags = 0; 1405 if (cow & MAP_COPY_ON_WRITE) 1406 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1407 if (cow & MAP_NOFAULT) 1408 protoeflags |= MAP_ENTRY_NOFAULT; 1409 if (cow & MAP_DISABLE_SYNCER) 1410 protoeflags |= MAP_ENTRY_NOSYNC; 1411 if (cow & MAP_DISABLE_COREDUMP) 1412 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1413 if (cow & MAP_STACK_GROWS_DOWN) 1414 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1415 if (cow & MAP_STACK_GROWS_UP) 1416 protoeflags |= MAP_ENTRY_GROWS_UP; 1417 if (cow & MAP_VN_WRITECOUNT) 1418 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1419 if (cow & MAP_VN_EXEC) 1420 protoeflags |= MAP_ENTRY_VN_EXEC; 1421 if ((cow & MAP_CREATE_GUARD) != 0) 1422 protoeflags |= MAP_ENTRY_GUARD; 1423 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1424 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1425 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1426 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1427 if (cow & MAP_INHERIT_SHARE) 1428 inheritance = VM_INHERIT_SHARE; 1429 else 1430 inheritance = VM_INHERIT_DEFAULT; 1431 1432 cred = NULL; 1433 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1434 goto charged; 1435 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1436 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1437 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1438 return (KERN_RESOURCE_SHORTAGE); 1439 KASSERT(object == NULL || 1440 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1441 object->cred == NULL, 1442 ("overcommit: vm_map_insert o %p", object)); 1443 cred = curthread->td_ucred; 1444 } 1445 1446 charged: 1447 /* Expand the kernel pmap, if necessary. */ 1448 if (map == kernel_map && end > kernel_vm_end) 1449 pmap_growkernel(end); 1450 if (object != NULL) { 1451 /* 1452 * OBJ_ONEMAPPING must be cleared unless this mapping 1453 * is trivially proven to be the only mapping for any 1454 * of the object's pages. (Object granularity 1455 * reference counting is insufficient to recognize 1456 * aliases with precision.) 1457 */ 1458 VM_OBJECT_WLOCK(object); 1459 if (object->ref_count > 1 || object->shadow_count != 0) 1460 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1461 VM_OBJECT_WUNLOCK(object); 1462 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1463 protoeflags && 1464 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP | 1465 MAP_VN_EXEC)) == 0 && 1466 prev_entry->end == start && (prev_entry->cred == cred || 1467 (prev_entry->object.vm_object != NULL && 1468 prev_entry->object.vm_object->cred == cred)) && 1469 vm_object_coalesce(prev_entry->object.vm_object, 1470 prev_entry->offset, 1471 (vm_size_t)(prev_entry->end - prev_entry->start), 1472 (vm_size_t)(end - prev_entry->end), cred != NULL && 1473 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1474 /* 1475 * We were able to extend the object. Determine if we 1476 * can extend the previous map entry to include the 1477 * new range as well. 1478 */ 1479 if (prev_entry->inheritance == inheritance && 1480 prev_entry->protection == prot && 1481 prev_entry->max_protection == max && 1482 prev_entry->wired_count == 0) { 1483 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1484 0, ("prev_entry %p has incoherent wiring", 1485 prev_entry)); 1486 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1487 map->size += end - prev_entry->end; 1488 prev_entry->end = end; 1489 vm_map_entry_resize_free(map, prev_entry); 1490 vm_map_simplify_entry(map, prev_entry); 1491 return (KERN_SUCCESS); 1492 } 1493 1494 /* 1495 * If we can extend the object but cannot extend the 1496 * map entry, we have to create a new map entry. We 1497 * must bump the ref count on the extended object to 1498 * account for it. object may be NULL. 1499 */ 1500 object = prev_entry->object.vm_object; 1501 offset = prev_entry->offset + 1502 (prev_entry->end - prev_entry->start); 1503 vm_object_reference(object); 1504 if (cred != NULL && object != NULL && object->cred != NULL && 1505 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1506 /* Object already accounts for this uid. */ 1507 cred = NULL; 1508 } 1509 } 1510 if (cred != NULL) 1511 crhold(cred); 1512 1513 /* 1514 * Create a new entry 1515 */ 1516 new_entry = vm_map_entry_create(map); 1517 new_entry->start = start; 1518 new_entry->end = end; 1519 new_entry->cred = NULL; 1520 1521 new_entry->eflags = protoeflags; 1522 new_entry->object.vm_object = object; 1523 new_entry->offset = offset; 1524 1525 new_entry->inheritance = inheritance; 1526 new_entry->protection = prot; 1527 new_entry->max_protection = max; 1528 new_entry->wired_count = 0; 1529 new_entry->wiring_thread = NULL; 1530 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1531 new_entry->next_read = start; 1532 1533 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1534 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1535 new_entry->cred = cred; 1536 1537 /* 1538 * Insert the new entry into the list 1539 */ 1540 vm_map_entry_link(map, new_entry); 1541 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1542 map->size += new_entry->end - new_entry->start; 1543 1544 /* 1545 * Try to coalesce the new entry with both the previous and next 1546 * entries in the list. Previously, we only attempted to coalesce 1547 * with the previous entry when object is NULL. Here, we handle the 1548 * other cases, which are less common. 1549 */ 1550 vm_map_simplify_entry(map, new_entry); 1551 1552 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1553 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1554 end - start, cow & MAP_PREFAULT_PARTIAL); 1555 } 1556 1557 return (KERN_SUCCESS); 1558 } 1559 1560 /* 1561 * vm_map_findspace: 1562 * 1563 * Find the first fit (lowest VM address) for "length" free bytes 1564 * beginning at address >= start in the given map. 1565 * 1566 * In a vm_map_entry, "max_free" is the maximum amount of 1567 * contiguous free space between an entry in its subtree and a 1568 * neighbor of that entry. This allows finding a free region in 1569 * one path down the tree, so O(log n) amortized with splay 1570 * trees. 1571 * 1572 * The map must be locked, and leaves it so. 1573 * 1574 * Returns: starting address if sufficient space, 1575 * vm_map_max(map)-length+1 if insufficient space. 1576 */ 1577 vm_offset_t 1578 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1579 { 1580 vm_map_entry_t llist, rlist, root, y; 1581 vm_size_t left_length; 1582 1583 /* 1584 * Request must fit within min/max VM address and must avoid 1585 * address wrap. 1586 */ 1587 start = MAX(start, vm_map_min(map)); 1588 if (start + length > vm_map_max(map) || start + length < start) 1589 return (vm_map_max(map) - length + 1); 1590 1591 /* Empty tree means wide open address space. */ 1592 if (map->root == NULL) 1593 return (start); 1594 1595 /* 1596 * After splay, if start comes before root node, then there 1597 * must be a gap from start to the root. 1598 */ 1599 root = vm_map_splay_split(start, length, map->root, 1600 &llist, &rlist); 1601 if (root != NULL) 1602 start = root->end; 1603 else if (rlist != NULL) { 1604 root = rlist; 1605 rlist = root->left; 1606 root->left = NULL; 1607 } else { 1608 root = llist; 1609 llist = root->right; 1610 root->right = NULL; 1611 } 1612 map->root = vm_map_splay_merge(root, llist, rlist, 1613 root->left, root->right); 1614 VM_MAP_ASSERT_CONSISTENT(map); 1615 if (start + length <= root->start) 1616 return (start); 1617 1618 /* 1619 * Root is the last node that might begin its gap before 1620 * start, and this is the last comparison where address 1621 * wrap might be a problem. 1622 */ 1623 if (root->right == NULL && 1624 start + length <= vm_map_max(map)) 1625 return (start); 1626 1627 /* With max_free, can immediately tell if no solution. */ 1628 if (root->right == NULL || length > root->right->max_free) 1629 return (vm_map_max(map) - length + 1); 1630 1631 /* 1632 * Splay for the least large-enough gap in the right subtree. 1633 */ 1634 llist = NULL; 1635 rlist = NULL; 1636 for (left_length = 0; ; 1637 left_length = root->left != NULL ? 1638 root->left->max_free : root->start - llist->end) { 1639 if (length <= left_length) 1640 SPLAY_LEFT_STEP(root, y, rlist, 1641 length <= (y->left != NULL ? 1642 y->left->max_free : y->start - llist->end)); 1643 else 1644 SPLAY_RIGHT_STEP(root, y, llist, 1645 length > (y->left != NULL ? 1646 y->left->max_free : y->start - root->end)); 1647 if (root == NULL) 1648 break; 1649 } 1650 root = llist; 1651 llist = root->right; 1652 if ((y = rlist) == NULL) 1653 root->right = NULL; 1654 else { 1655 rlist = y->left; 1656 y->left = NULL; 1657 root->right = y->right; 1658 } 1659 root = vm_map_splay_merge(root, llist, rlist, 1660 root->left, root->right); 1661 if (y != NULL) { 1662 y->right = root->right; 1663 vm_map_entry_set_max_free(y); 1664 root->right = y; 1665 vm_map_entry_set_max_free(root); 1666 } 1667 map->root = root; 1668 VM_MAP_ASSERT_CONSISTENT(map); 1669 return (root->end); 1670 } 1671 1672 int 1673 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1674 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1675 vm_prot_t max, int cow) 1676 { 1677 vm_offset_t end; 1678 int result; 1679 1680 end = start + length; 1681 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1682 object == NULL, 1683 ("vm_map_fixed: non-NULL backing object for stack")); 1684 vm_map_lock(map); 1685 VM_MAP_RANGE_CHECK(map, start, end); 1686 if ((cow & MAP_CHECK_EXCL) == 0) 1687 vm_map_delete(map, start, end); 1688 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1689 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1690 prot, max, cow); 1691 } else { 1692 result = vm_map_insert(map, object, offset, start, end, 1693 prot, max, cow); 1694 } 1695 vm_map_unlock(map); 1696 return (result); 1697 } 1698 1699 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1700 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1701 1702 static int cluster_anon = 1; 1703 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 1704 &cluster_anon, 0, 1705 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 1706 1707 static bool 1708 clustering_anon_allowed(vm_offset_t addr) 1709 { 1710 1711 switch (cluster_anon) { 1712 case 0: 1713 return (false); 1714 case 1: 1715 return (addr == 0); 1716 case 2: 1717 default: 1718 return (true); 1719 } 1720 } 1721 1722 static long aslr_restarts; 1723 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 1724 &aslr_restarts, 0, 1725 "Number of aslr failures"); 1726 1727 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 1728 1729 /* 1730 * Searches for the specified amount of free space in the given map with the 1731 * specified alignment. Performs an address-ordered, first-fit search from 1732 * the given address "*addr", with an optional upper bound "max_addr". If the 1733 * parameter "alignment" is zero, then the alignment is computed from the 1734 * given (object, offset) pair so as to enable the greatest possible use of 1735 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1736 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1737 * 1738 * The map must be locked. Initially, there must be at least "length" bytes 1739 * of free space at the given address. 1740 */ 1741 static int 1742 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1743 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1744 vm_offset_t alignment) 1745 { 1746 vm_offset_t aligned_addr, free_addr; 1747 1748 VM_MAP_ASSERT_LOCKED(map); 1749 free_addr = *addr; 1750 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 1751 ("caller failed to provide space %d at address %p", 1752 (int)length, (void*)free_addr)); 1753 for (;;) { 1754 /* 1755 * At the start of every iteration, the free space at address 1756 * "*addr" is at least "length" bytes. 1757 */ 1758 if (alignment == 0) 1759 pmap_align_superpage(object, offset, addr, length); 1760 else if ((*addr & (alignment - 1)) != 0) { 1761 *addr &= ~(alignment - 1); 1762 *addr += alignment; 1763 } 1764 aligned_addr = *addr; 1765 if (aligned_addr == free_addr) { 1766 /* 1767 * Alignment did not change "*addr", so "*addr" must 1768 * still provide sufficient free space. 1769 */ 1770 return (KERN_SUCCESS); 1771 } 1772 1773 /* 1774 * Test for address wrap on "*addr". A wrapped "*addr" could 1775 * be a valid address, in which case vm_map_findspace() cannot 1776 * be relied upon to fail. 1777 */ 1778 if (aligned_addr < free_addr) 1779 return (KERN_NO_SPACE); 1780 *addr = vm_map_findspace(map, aligned_addr, length); 1781 if (*addr + length > vm_map_max(map) || 1782 (max_addr != 0 && *addr + length > max_addr)) 1783 return (KERN_NO_SPACE); 1784 free_addr = *addr; 1785 if (free_addr == aligned_addr) { 1786 /* 1787 * If a successful call to vm_map_findspace() did not 1788 * change "*addr", then "*addr" must still be aligned 1789 * and provide sufficient free space. 1790 */ 1791 return (KERN_SUCCESS); 1792 } 1793 } 1794 } 1795 1796 /* 1797 * vm_map_find finds an unallocated region in the target address 1798 * map with the given length. The search is defined to be 1799 * first-fit from the specified address; the region found is 1800 * returned in the same parameter. 1801 * 1802 * If object is non-NULL, ref count must be bumped by caller 1803 * prior to making call to account for the new entry. 1804 */ 1805 int 1806 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1807 vm_offset_t *addr, /* IN/OUT */ 1808 vm_size_t length, vm_offset_t max_addr, int find_space, 1809 vm_prot_t prot, vm_prot_t max, int cow) 1810 { 1811 vm_offset_t alignment, curr_min_addr, min_addr; 1812 int gap, pidx, rv, try; 1813 bool cluster, en_aslr, update_anon; 1814 1815 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1816 object == NULL, 1817 ("vm_map_find: non-NULL backing object for stack")); 1818 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 1819 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 1820 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1821 (object->flags & OBJ_COLORED) == 0)) 1822 find_space = VMFS_ANY_SPACE; 1823 if (find_space >> 8 != 0) { 1824 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1825 alignment = (vm_offset_t)1 << (find_space >> 8); 1826 } else 1827 alignment = 0; 1828 en_aslr = (map->flags & MAP_ASLR) != 0; 1829 update_anon = cluster = clustering_anon_allowed(*addr) && 1830 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 1831 find_space != VMFS_NO_SPACE && object == NULL && 1832 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP | 1833 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE; 1834 curr_min_addr = min_addr = *addr; 1835 if (en_aslr && min_addr == 0 && !cluster && 1836 find_space != VMFS_NO_SPACE && 1837 (map->flags & MAP_ASLR_IGNSTART) != 0) 1838 curr_min_addr = min_addr = vm_map_min(map); 1839 try = 0; 1840 vm_map_lock(map); 1841 if (cluster) { 1842 curr_min_addr = map->anon_loc; 1843 if (curr_min_addr == 0) 1844 cluster = false; 1845 } 1846 if (find_space != VMFS_NO_SPACE) { 1847 KASSERT(find_space == VMFS_ANY_SPACE || 1848 find_space == VMFS_OPTIMAL_SPACE || 1849 find_space == VMFS_SUPER_SPACE || 1850 alignment != 0, ("unexpected VMFS flag")); 1851 again: 1852 /* 1853 * When creating an anonymous mapping, try clustering 1854 * with an existing anonymous mapping first. 1855 * 1856 * We make up to two attempts to find address space 1857 * for a given find_space value. The first attempt may 1858 * apply randomization or may cluster with an existing 1859 * anonymous mapping. If this first attempt fails, 1860 * perform a first-fit search of the available address 1861 * space. 1862 * 1863 * If all tries failed, and find_space is 1864 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 1865 * Again enable clustering and randomization. 1866 */ 1867 try++; 1868 MPASS(try <= 2); 1869 1870 if (try == 2) { 1871 /* 1872 * Second try: we failed either to find a 1873 * suitable region for randomizing the 1874 * allocation, or to cluster with an existing 1875 * mapping. Retry with free run. 1876 */ 1877 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 1878 vm_map_min(map) : min_addr; 1879 atomic_add_long(&aslr_restarts, 1); 1880 } 1881 1882 if (try == 1 && en_aslr && !cluster) { 1883 /* 1884 * Find space for allocation, including 1885 * gap needed for later randomization. 1886 */ 1887 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && 1888 (find_space == VMFS_SUPER_SPACE || find_space == 1889 VMFS_OPTIMAL_SPACE) ? 1 : 0; 1890 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 1891 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 1892 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 1893 *addr = vm_map_findspace(map, curr_min_addr, 1894 length + gap * pagesizes[pidx]); 1895 if (*addr + length + gap * pagesizes[pidx] > 1896 vm_map_max(map)) 1897 goto again; 1898 /* And randomize the start address. */ 1899 *addr += (arc4random() % gap) * pagesizes[pidx]; 1900 if (max_addr != 0 && *addr + length > max_addr) 1901 goto again; 1902 } else { 1903 *addr = vm_map_findspace(map, curr_min_addr, length); 1904 if (*addr + length > vm_map_max(map) || 1905 (max_addr != 0 && *addr + length > max_addr)) { 1906 if (cluster) { 1907 cluster = false; 1908 MPASS(try == 1); 1909 goto again; 1910 } 1911 rv = KERN_NO_SPACE; 1912 goto done; 1913 } 1914 } 1915 1916 if (find_space != VMFS_ANY_SPACE && 1917 (rv = vm_map_alignspace(map, object, offset, addr, length, 1918 max_addr, alignment)) != KERN_SUCCESS) { 1919 if (find_space == VMFS_OPTIMAL_SPACE) { 1920 find_space = VMFS_ANY_SPACE; 1921 curr_min_addr = min_addr; 1922 cluster = update_anon; 1923 try = 0; 1924 goto again; 1925 } 1926 goto done; 1927 } 1928 } else if ((cow & MAP_REMAP) != 0) { 1929 if (*addr < vm_map_min(map) || 1930 *addr + length > vm_map_max(map) || 1931 *addr + length <= length) { 1932 rv = KERN_INVALID_ADDRESS; 1933 goto done; 1934 } 1935 vm_map_delete(map, *addr, *addr + length); 1936 } 1937 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1938 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1939 max, cow); 1940 } else { 1941 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1942 prot, max, cow); 1943 } 1944 if (rv == KERN_SUCCESS && update_anon) 1945 map->anon_loc = *addr + length; 1946 done: 1947 vm_map_unlock(map); 1948 return (rv); 1949 } 1950 1951 /* 1952 * vm_map_find_min() is a variant of vm_map_find() that takes an 1953 * additional parameter (min_addr) and treats the given address 1954 * (*addr) differently. Specifically, it treats *addr as a hint 1955 * and not as the minimum address where the mapping is created. 1956 * 1957 * This function works in two phases. First, it tries to 1958 * allocate above the hint. If that fails and the hint is 1959 * greater than min_addr, it performs a second pass, replacing 1960 * the hint with min_addr as the minimum address for the 1961 * allocation. 1962 */ 1963 int 1964 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1965 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1966 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1967 int cow) 1968 { 1969 vm_offset_t hint; 1970 int rv; 1971 1972 hint = *addr; 1973 for (;;) { 1974 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1975 find_space, prot, max, cow); 1976 if (rv == KERN_SUCCESS || min_addr >= hint) 1977 return (rv); 1978 *addr = hint = min_addr; 1979 } 1980 } 1981 1982 /* 1983 * A map entry with any of the following flags set must not be merged with 1984 * another entry. 1985 */ 1986 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 1987 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC) 1988 1989 static bool 1990 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 1991 { 1992 1993 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 1994 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 1995 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 1996 prev, entry)); 1997 return (prev->end == entry->start && 1998 prev->object.vm_object == entry->object.vm_object && 1999 (prev->object.vm_object == NULL || 2000 prev->offset + (prev->end - prev->start) == entry->offset) && 2001 prev->eflags == entry->eflags && 2002 prev->protection == entry->protection && 2003 prev->max_protection == entry->max_protection && 2004 prev->inheritance == entry->inheritance && 2005 prev->wired_count == entry->wired_count && 2006 prev->cred == entry->cred); 2007 } 2008 2009 static void 2010 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2011 { 2012 2013 /* 2014 * If the backing object is a vnode object, vm_object_deallocate() 2015 * calls vrele(). However, vrele() does not lock the vnode because 2016 * the vnode has additional references. Thus, the map lock can be 2017 * kept without causing a lock-order reversal with the vnode lock. 2018 * 2019 * Since we count the number of virtual page mappings in 2020 * object->un_pager.vnp.writemappings, the writemappings value 2021 * should not be adjusted when the entry is disposed of. 2022 */ 2023 if (entry->object.vm_object != NULL) 2024 vm_object_deallocate(entry->object.vm_object); 2025 if (entry->cred != NULL) 2026 crfree(entry->cred); 2027 vm_map_entry_dispose(map, entry); 2028 } 2029 2030 /* 2031 * vm_map_simplify_entry: 2032 * 2033 * Simplify the given map entry by merging with either neighbor. This 2034 * routine also has the ability to merge with both neighbors. 2035 * 2036 * The map must be locked. 2037 * 2038 * This routine guarantees that the passed entry remains valid (though 2039 * possibly extended). When merging, this routine may delete one or 2040 * both neighbors. 2041 */ 2042 void 2043 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 2044 { 2045 vm_map_entry_t next, prev; 2046 2047 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) 2048 return; 2049 prev = entry->prev; 2050 if (vm_map_mergeable_neighbors(prev, entry)) { 2051 vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); 2052 vm_map_merged_neighbor_dispose(map, prev); 2053 } 2054 next = entry->next; 2055 if (vm_map_mergeable_neighbors(entry, next)) { 2056 vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV); 2057 vm_map_merged_neighbor_dispose(map, next); 2058 } 2059 } 2060 2061 /* 2062 * vm_map_clip_start: [ internal use only ] 2063 * 2064 * Asserts that the given entry begins at or after 2065 * the specified address; if necessary, 2066 * it splits the entry into two. 2067 */ 2068 #define vm_map_clip_start(map, entry, startaddr) \ 2069 { \ 2070 if (startaddr > entry->start) \ 2071 _vm_map_clip_start(map, entry, startaddr); \ 2072 } 2073 2074 /* 2075 * This routine is called only when it is known that 2076 * the entry must be split. 2077 */ 2078 static void 2079 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 2080 { 2081 vm_map_entry_t new_entry; 2082 2083 VM_MAP_ASSERT_LOCKED(map); 2084 KASSERT(entry->end > start && entry->start < start, 2085 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 2086 2087 /* 2088 * Split off the front portion -- note that we must insert the new 2089 * entry BEFORE this one, so that this entry has the specified 2090 * starting address. 2091 */ 2092 vm_map_simplify_entry(map, entry); 2093 2094 /* 2095 * If there is no object backing this entry, we might as well create 2096 * one now. If we defer it, an object can get created after the map 2097 * is clipped, and individual objects will be created for the split-up 2098 * map. This is a bit of a hack, but is also about the best place to 2099 * put this improvement. 2100 */ 2101 if (entry->object.vm_object == NULL && !map->system_map && 2102 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 2103 vm_object_t object; 2104 object = vm_object_allocate(OBJT_DEFAULT, 2105 atop(entry->end - entry->start)); 2106 entry->object.vm_object = object; 2107 entry->offset = 0; 2108 if (entry->cred != NULL) { 2109 object->cred = entry->cred; 2110 object->charge = entry->end - entry->start; 2111 entry->cred = NULL; 2112 } 2113 } else if (entry->object.vm_object != NULL && 2114 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2115 entry->cred != NULL) { 2116 VM_OBJECT_WLOCK(entry->object.vm_object); 2117 KASSERT(entry->object.vm_object->cred == NULL, 2118 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 2119 entry->object.vm_object->cred = entry->cred; 2120 entry->object.vm_object->charge = entry->end - entry->start; 2121 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2122 entry->cred = NULL; 2123 } 2124 2125 new_entry = vm_map_entry_create(map); 2126 *new_entry = *entry; 2127 2128 new_entry->end = start; 2129 entry->offset += (start - entry->start); 2130 entry->start = start; 2131 if (new_entry->cred != NULL) 2132 crhold(entry->cred); 2133 2134 vm_map_entry_link(map, new_entry); 2135 2136 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2137 vm_object_reference(new_entry->object.vm_object); 2138 vm_map_entry_set_vnode_text(new_entry, true); 2139 /* 2140 * The object->un_pager.vnp.writemappings for the 2141 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 2142 * kept as is here. The virtual pages are 2143 * re-distributed among the clipped entries, so the sum is 2144 * left the same. 2145 */ 2146 } 2147 } 2148 2149 /* 2150 * vm_map_clip_end: [ internal use only ] 2151 * 2152 * Asserts that the given entry ends at or before 2153 * the specified address; if necessary, 2154 * it splits the entry into two. 2155 */ 2156 #define vm_map_clip_end(map, entry, endaddr) \ 2157 { \ 2158 if ((endaddr) < (entry->end)) \ 2159 _vm_map_clip_end((map), (entry), (endaddr)); \ 2160 } 2161 2162 /* 2163 * This routine is called only when it is known that 2164 * the entry must be split. 2165 */ 2166 static void 2167 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 2168 { 2169 vm_map_entry_t new_entry; 2170 2171 VM_MAP_ASSERT_LOCKED(map); 2172 KASSERT(entry->start < end && entry->end > end, 2173 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 2174 2175 /* 2176 * If there is no object backing this entry, we might as well create 2177 * one now. If we defer it, an object can get created after the map 2178 * is clipped, and individual objects will be created for the split-up 2179 * map. This is a bit of a hack, but is also about the best place to 2180 * put this improvement. 2181 */ 2182 if (entry->object.vm_object == NULL && !map->system_map && 2183 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 2184 vm_object_t object; 2185 object = vm_object_allocate(OBJT_DEFAULT, 2186 atop(entry->end - entry->start)); 2187 entry->object.vm_object = object; 2188 entry->offset = 0; 2189 if (entry->cred != NULL) { 2190 object->cred = entry->cred; 2191 object->charge = entry->end - entry->start; 2192 entry->cred = NULL; 2193 } 2194 } else if (entry->object.vm_object != NULL && 2195 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2196 entry->cred != NULL) { 2197 VM_OBJECT_WLOCK(entry->object.vm_object); 2198 KASSERT(entry->object.vm_object->cred == NULL, 2199 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 2200 entry->object.vm_object->cred = entry->cred; 2201 entry->object.vm_object->charge = entry->end - entry->start; 2202 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2203 entry->cred = NULL; 2204 } 2205 2206 /* 2207 * Create a new entry and insert it AFTER the specified entry 2208 */ 2209 new_entry = vm_map_entry_create(map); 2210 *new_entry = *entry; 2211 2212 new_entry->start = entry->end = end; 2213 new_entry->offset += (end - entry->start); 2214 if (new_entry->cred != NULL) 2215 crhold(entry->cred); 2216 2217 vm_map_entry_link(map, new_entry); 2218 2219 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2220 vm_object_reference(new_entry->object.vm_object); 2221 vm_map_entry_set_vnode_text(new_entry, true); 2222 } 2223 } 2224 2225 /* 2226 * vm_map_submap: [ kernel use only ] 2227 * 2228 * Mark the given range as handled by a subordinate map. 2229 * 2230 * This range must have been created with vm_map_find, 2231 * and no other operations may have been performed on this 2232 * range prior to calling vm_map_submap. 2233 * 2234 * Only a limited number of operations can be performed 2235 * within this rage after calling vm_map_submap: 2236 * vm_fault 2237 * [Don't try vm_map_copy!] 2238 * 2239 * To remove a submapping, one must first remove the 2240 * range from the superior map, and then destroy the 2241 * submap (if desired). [Better yet, don't try it.] 2242 */ 2243 int 2244 vm_map_submap( 2245 vm_map_t map, 2246 vm_offset_t start, 2247 vm_offset_t end, 2248 vm_map_t submap) 2249 { 2250 vm_map_entry_t entry; 2251 int result; 2252 2253 result = KERN_INVALID_ARGUMENT; 2254 2255 vm_map_lock(submap); 2256 submap->flags |= MAP_IS_SUB_MAP; 2257 vm_map_unlock(submap); 2258 2259 vm_map_lock(map); 2260 2261 VM_MAP_RANGE_CHECK(map, start, end); 2262 2263 if (vm_map_lookup_entry(map, start, &entry)) { 2264 vm_map_clip_start(map, entry, start); 2265 } else 2266 entry = entry->next; 2267 2268 vm_map_clip_end(map, entry, end); 2269 2270 if ((entry->start == start) && (entry->end == end) && 2271 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2272 (entry->object.vm_object == NULL)) { 2273 entry->object.sub_map = submap; 2274 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2275 result = KERN_SUCCESS; 2276 } 2277 vm_map_unlock(map); 2278 2279 if (result != KERN_SUCCESS) { 2280 vm_map_lock(submap); 2281 submap->flags &= ~MAP_IS_SUB_MAP; 2282 vm_map_unlock(submap); 2283 } 2284 return (result); 2285 } 2286 2287 /* 2288 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2289 */ 2290 #define MAX_INIT_PT 96 2291 2292 /* 2293 * vm_map_pmap_enter: 2294 * 2295 * Preload the specified map's pmap with mappings to the specified 2296 * object's memory-resident pages. No further physical pages are 2297 * allocated, and no further virtual pages are retrieved from secondary 2298 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2299 * limited number of page mappings are created at the low-end of the 2300 * specified address range. (For this purpose, a superpage mapping 2301 * counts as one page mapping.) Otherwise, all resident pages within 2302 * the specified address range are mapped. 2303 */ 2304 static void 2305 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2306 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2307 { 2308 vm_offset_t start; 2309 vm_page_t p, p_start; 2310 vm_pindex_t mask, psize, threshold, tmpidx; 2311 2312 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2313 return; 2314 VM_OBJECT_RLOCK(object); 2315 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2316 VM_OBJECT_RUNLOCK(object); 2317 VM_OBJECT_WLOCK(object); 2318 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2319 pmap_object_init_pt(map->pmap, addr, object, pindex, 2320 size); 2321 VM_OBJECT_WUNLOCK(object); 2322 return; 2323 } 2324 VM_OBJECT_LOCK_DOWNGRADE(object); 2325 } 2326 2327 psize = atop(size); 2328 if (psize + pindex > object->size) { 2329 if (object->size < pindex) { 2330 VM_OBJECT_RUNLOCK(object); 2331 return; 2332 } 2333 psize = object->size - pindex; 2334 } 2335 2336 start = 0; 2337 p_start = NULL; 2338 threshold = MAX_INIT_PT; 2339 2340 p = vm_page_find_least(object, pindex); 2341 /* 2342 * Assert: the variable p is either (1) the page with the 2343 * least pindex greater than or equal to the parameter pindex 2344 * or (2) NULL. 2345 */ 2346 for (; 2347 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2348 p = TAILQ_NEXT(p, listq)) { 2349 /* 2350 * don't allow an madvise to blow away our really 2351 * free pages allocating pv entries. 2352 */ 2353 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2354 vm_page_count_severe()) || 2355 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2356 tmpidx >= threshold)) { 2357 psize = tmpidx; 2358 break; 2359 } 2360 if (p->valid == VM_PAGE_BITS_ALL) { 2361 if (p_start == NULL) { 2362 start = addr + ptoa(tmpidx); 2363 p_start = p; 2364 } 2365 /* Jump ahead if a superpage mapping is possible. */ 2366 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2367 (pagesizes[p->psind] - 1)) == 0) { 2368 mask = atop(pagesizes[p->psind]) - 1; 2369 if (tmpidx + mask < psize && 2370 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2371 p += mask; 2372 threshold += mask; 2373 } 2374 } 2375 } else if (p_start != NULL) { 2376 pmap_enter_object(map->pmap, start, addr + 2377 ptoa(tmpidx), p_start, prot); 2378 p_start = NULL; 2379 } 2380 } 2381 if (p_start != NULL) 2382 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2383 p_start, prot); 2384 VM_OBJECT_RUNLOCK(object); 2385 } 2386 2387 /* 2388 * vm_map_protect: 2389 * 2390 * Sets the protection of the specified address 2391 * region in the target map. If "set_max" is 2392 * specified, the maximum protection is to be set; 2393 * otherwise, only the current protection is affected. 2394 */ 2395 int 2396 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2397 vm_prot_t new_prot, boolean_t set_max) 2398 { 2399 vm_map_entry_t current, entry, in_tran; 2400 vm_object_t obj; 2401 struct ucred *cred; 2402 vm_prot_t old_prot; 2403 2404 if (start == end) 2405 return (KERN_SUCCESS); 2406 2407 again: 2408 in_tran = NULL; 2409 vm_map_lock(map); 2410 2411 /* 2412 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2413 * need to fault pages into the map and will drop the map lock while 2414 * doing so, and the VM object may end up in an inconsistent state if we 2415 * update the protection on the map entry in between faults. 2416 */ 2417 vm_map_wait_busy(map); 2418 2419 VM_MAP_RANGE_CHECK(map, start, end); 2420 2421 if (vm_map_lookup_entry(map, start, &entry)) { 2422 vm_map_clip_start(map, entry, start); 2423 } else { 2424 entry = entry->next; 2425 } 2426 2427 /* 2428 * Make a first pass to check for protection violations. 2429 */ 2430 for (current = entry; current->start < end; current = current->next) { 2431 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2432 continue; 2433 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2434 vm_map_unlock(map); 2435 return (KERN_INVALID_ARGUMENT); 2436 } 2437 if ((new_prot & current->max_protection) != new_prot) { 2438 vm_map_unlock(map); 2439 return (KERN_PROTECTION_FAILURE); 2440 } 2441 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2442 in_tran = entry; 2443 } 2444 2445 /* 2446 * Postpone the operation until all in transition map entries 2447 * are stabilized. In-transition entry might already have its 2448 * pages wired and wired_count incremented, but 2449 * MAP_ENTRY_USER_WIRED flag not yet set, and visible to other 2450 * threads because the map lock is dropped. In this case we 2451 * would miss our call to vm_fault_copy_entry(). 2452 */ 2453 if (in_tran != NULL) { 2454 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2455 vm_map_unlock_and_wait(map, 0); 2456 goto again; 2457 } 2458 2459 /* 2460 * Do an accounting pass for private read-only mappings that 2461 * now will do cow due to allowed write (e.g. debugger sets 2462 * breakpoint on text segment) 2463 */ 2464 for (current = entry; current->start < end; current = current->next) { 2465 2466 vm_map_clip_end(map, current, end); 2467 2468 if (set_max || 2469 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2470 ENTRY_CHARGED(current) || 2471 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2472 continue; 2473 } 2474 2475 cred = curthread->td_ucred; 2476 obj = current->object.vm_object; 2477 2478 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2479 if (!swap_reserve(current->end - current->start)) { 2480 vm_map_unlock(map); 2481 return (KERN_RESOURCE_SHORTAGE); 2482 } 2483 crhold(cred); 2484 current->cred = cred; 2485 continue; 2486 } 2487 2488 VM_OBJECT_WLOCK(obj); 2489 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2490 VM_OBJECT_WUNLOCK(obj); 2491 continue; 2492 } 2493 2494 /* 2495 * Charge for the whole object allocation now, since 2496 * we cannot distinguish between non-charged and 2497 * charged clipped mapping of the same object later. 2498 */ 2499 KASSERT(obj->charge == 0, 2500 ("vm_map_protect: object %p overcharged (entry %p)", 2501 obj, current)); 2502 if (!swap_reserve(ptoa(obj->size))) { 2503 VM_OBJECT_WUNLOCK(obj); 2504 vm_map_unlock(map); 2505 return (KERN_RESOURCE_SHORTAGE); 2506 } 2507 2508 crhold(cred); 2509 obj->cred = cred; 2510 obj->charge = ptoa(obj->size); 2511 VM_OBJECT_WUNLOCK(obj); 2512 } 2513 2514 /* 2515 * Go back and fix up protections. [Note that clipping is not 2516 * necessary the second time.] 2517 */ 2518 for (current = entry; current->start < end; current = current->next) { 2519 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2520 continue; 2521 2522 old_prot = current->protection; 2523 2524 if (set_max) 2525 current->protection = 2526 (current->max_protection = new_prot) & 2527 old_prot; 2528 else 2529 current->protection = new_prot; 2530 2531 /* 2532 * For user wired map entries, the normal lazy evaluation of 2533 * write access upgrades through soft page faults is 2534 * undesirable. Instead, immediately copy any pages that are 2535 * copy-on-write and enable write access in the physical map. 2536 */ 2537 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2538 (current->protection & VM_PROT_WRITE) != 0 && 2539 (old_prot & VM_PROT_WRITE) == 0) 2540 vm_fault_copy_entry(map, map, current, current, NULL); 2541 2542 /* 2543 * When restricting access, update the physical map. Worry 2544 * about copy-on-write here. 2545 */ 2546 if ((old_prot & ~current->protection) != 0) { 2547 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2548 VM_PROT_ALL) 2549 pmap_protect(map->pmap, current->start, 2550 current->end, 2551 current->protection & MASK(current)); 2552 #undef MASK 2553 } 2554 vm_map_simplify_entry(map, current); 2555 } 2556 vm_map_unlock(map); 2557 return (KERN_SUCCESS); 2558 } 2559 2560 /* 2561 * vm_map_madvise: 2562 * 2563 * This routine traverses a processes map handling the madvise 2564 * system call. Advisories are classified as either those effecting 2565 * the vm_map_entry structure, or those effecting the underlying 2566 * objects. 2567 */ 2568 int 2569 vm_map_madvise( 2570 vm_map_t map, 2571 vm_offset_t start, 2572 vm_offset_t end, 2573 int behav) 2574 { 2575 vm_map_entry_t current, entry; 2576 bool modify_map; 2577 2578 /* 2579 * Some madvise calls directly modify the vm_map_entry, in which case 2580 * we need to use an exclusive lock on the map and we need to perform 2581 * various clipping operations. Otherwise we only need a read-lock 2582 * on the map. 2583 */ 2584 switch(behav) { 2585 case MADV_NORMAL: 2586 case MADV_SEQUENTIAL: 2587 case MADV_RANDOM: 2588 case MADV_NOSYNC: 2589 case MADV_AUTOSYNC: 2590 case MADV_NOCORE: 2591 case MADV_CORE: 2592 if (start == end) 2593 return (0); 2594 modify_map = true; 2595 vm_map_lock(map); 2596 break; 2597 case MADV_WILLNEED: 2598 case MADV_DONTNEED: 2599 case MADV_FREE: 2600 if (start == end) 2601 return (0); 2602 modify_map = false; 2603 vm_map_lock_read(map); 2604 break; 2605 default: 2606 return (EINVAL); 2607 } 2608 2609 /* 2610 * Locate starting entry and clip if necessary. 2611 */ 2612 VM_MAP_RANGE_CHECK(map, start, end); 2613 2614 if (vm_map_lookup_entry(map, start, &entry)) { 2615 if (modify_map) 2616 vm_map_clip_start(map, entry, start); 2617 } else { 2618 entry = entry->next; 2619 } 2620 2621 if (modify_map) { 2622 /* 2623 * madvise behaviors that are implemented in the vm_map_entry. 2624 * 2625 * We clip the vm_map_entry so that behavioral changes are 2626 * limited to the specified address range. 2627 */ 2628 for (current = entry; current->start < end; 2629 current = current->next) { 2630 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2631 continue; 2632 2633 vm_map_clip_end(map, current, end); 2634 2635 switch (behav) { 2636 case MADV_NORMAL: 2637 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2638 break; 2639 case MADV_SEQUENTIAL: 2640 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2641 break; 2642 case MADV_RANDOM: 2643 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2644 break; 2645 case MADV_NOSYNC: 2646 current->eflags |= MAP_ENTRY_NOSYNC; 2647 break; 2648 case MADV_AUTOSYNC: 2649 current->eflags &= ~MAP_ENTRY_NOSYNC; 2650 break; 2651 case MADV_NOCORE: 2652 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2653 break; 2654 case MADV_CORE: 2655 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2656 break; 2657 default: 2658 break; 2659 } 2660 vm_map_simplify_entry(map, current); 2661 } 2662 vm_map_unlock(map); 2663 } else { 2664 vm_pindex_t pstart, pend; 2665 2666 /* 2667 * madvise behaviors that are implemented in the underlying 2668 * vm_object. 2669 * 2670 * Since we don't clip the vm_map_entry, we have to clip 2671 * the vm_object pindex and count. 2672 */ 2673 for (current = entry; current->start < end; 2674 current = current->next) { 2675 vm_offset_t useEnd, useStart; 2676 2677 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2678 continue; 2679 2680 pstart = OFF_TO_IDX(current->offset); 2681 pend = pstart + atop(current->end - current->start); 2682 useStart = current->start; 2683 useEnd = current->end; 2684 2685 if (current->start < start) { 2686 pstart += atop(start - current->start); 2687 useStart = start; 2688 } 2689 if (current->end > end) { 2690 pend -= atop(current->end - end); 2691 useEnd = end; 2692 } 2693 2694 if (pstart >= pend) 2695 continue; 2696 2697 /* 2698 * Perform the pmap_advise() before clearing 2699 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2700 * concurrent pmap operation, such as pmap_remove(), 2701 * could clear a reference in the pmap and set 2702 * PGA_REFERENCED on the page before the pmap_advise() 2703 * had completed. Consequently, the page would appear 2704 * referenced based upon an old reference that 2705 * occurred before this pmap_advise() ran. 2706 */ 2707 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2708 pmap_advise(map->pmap, useStart, useEnd, 2709 behav); 2710 2711 vm_object_madvise(current->object.vm_object, pstart, 2712 pend, behav); 2713 2714 /* 2715 * Pre-populate paging structures in the 2716 * WILLNEED case. For wired entries, the 2717 * paging structures are already populated. 2718 */ 2719 if (behav == MADV_WILLNEED && 2720 current->wired_count == 0) { 2721 vm_map_pmap_enter(map, 2722 useStart, 2723 current->protection, 2724 current->object.vm_object, 2725 pstart, 2726 ptoa(pend - pstart), 2727 MAP_PREFAULT_MADVISE 2728 ); 2729 } 2730 } 2731 vm_map_unlock_read(map); 2732 } 2733 return (0); 2734 } 2735 2736 2737 /* 2738 * vm_map_inherit: 2739 * 2740 * Sets the inheritance of the specified address 2741 * range in the target map. Inheritance 2742 * affects how the map will be shared with 2743 * child maps at the time of vmspace_fork. 2744 */ 2745 int 2746 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2747 vm_inherit_t new_inheritance) 2748 { 2749 vm_map_entry_t entry; 2750 vm_map_entry_t temp_entry; 2751 2752 switch (new_inheritance) { 2753 case VM_INHERIT_NONE: 2754 case VM_INHERIT_COPY: 2755 case VM_INHERIT_SHARE: 2756 case VM_INHERIT_ZERO: 2757 break; 2758 default: 2759 return (KERN_INVALID_ARGUMENT); 2760 } 2761 if (start == end) 2762 return (KERN_SUCCESS); 2763 vm_map_lock(map); 2764 VM_MAP_RANGE_CHECK(map, start, end); 2765 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2766 entry = temp_entry; 2767 vm_map_clip_start(map, entry, start); 2768 } else 2769 entry = temp_entry->next; 2770 while (entry->start < end) { 2771 vm_map_clip_end(map, entry, end); 2772 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2773 new_inheritance != VM_INHERIT_ZERO) 2774 entry->inheritance = new_inheritance; 2775 vm_map_simplify_entry(map, entry); 2776 entry = entry->next; 2777 } 2778 vm_map_unlock(map); 2779 return (KERN_SUCCESS); 2780 } 2781 2782 /* 2783 * vm_map_unwire: 2784 * 2785 * Implements both kernel and user unwiring. 2786 */ 2787 int 2788 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2789 int flags) 2790 { 2791 vm_map_entry_t entry, first_entry, tmp_entry; 2792 vm_offset_t saved_start; 2793 unsigned int last_timestamp; 2794 int rv; 2795 boolean_t need_wakeup, result, user_unwire; 2796 2797 if (start == end) 2798 return (KERN_SUCCESS); 2799 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2800 vm_map_lock(map); 2801 VM_MAP_RANGE_CHECK(map, start, end); 2802 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2803 if (flags & VM_MAP_WIRE_HOLESOK) 2804 first_entry = first_entry->next; 2805 else { 2806 vm_map_unlock(map); 2807 return (KERN_INVALID_ADDRESS); 2808 } 2809 } 2810 last_timestamp = map->timestamp; 2811 entry = first_entry; 2812 while (entry->start < end) { 2813 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2814 /* 2815 * We have not yet clipped the entry. 2816 */ 2817 saved_start = (start >= entry->start) ? start : 2818 entry->start; 2819 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2820 if (vm_map_unlock_and_wait(map, 0)) { 2821 /* 2822 * Allow interruption of user unwiring? 2823 */ 2824 } 2825 vm_map_lock(map); 2826 if (last_timestamp+1 != map->timestamp) { 2827 /* 2828 * Look again for the entry because the map was 2829 * modified while it was unlocked. 2830 * Specifically, the entry may have been 2831 * clipped, merged, or deleted. 2832 */ 2833 if (!vm_map_lookup_entry(map, saved_start, 2834 &tmp_entry)) { 2835 if (flags & VM_MAP_WIRE_HOLESOK) 2836 tmp_entry = tmp_entry->next; 2837 else { 2838 if (saved_start == start) { 2839 /* 2840 * First_entry has been deleted. 2841 */ 2842 vm_map_unlock(map); 2843 return (KERN_INVALID_ADDRESS); 2844 } 2845 end = saved_start; 2846 rv = KERN_INVALID_ADDRESS; 2847 goto done; 2848 } 2849 } 2850 if (entry == first_entry) 2851 first_entry = tmp_entry; 2852 else 2853 first_entry = NULL; 2854 entry = tmp_entry; 2855 } 2856 last_timestamp = map->timestamp; 2857 continue; 2858 } 2859 vm_map_clip_start(map, entry, start); 2860 vm_map_clip_end(map, entry, end); 2861 /* 2862 * Mark the entry in case the map lock is released. (See 2863 * above.) 2864 */ 2865 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2866 entry->wiring_thread == NULL, 2867 ("owned map entry %p", entry)); 2868 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2869 entry->wiring_thread = curthread; 2870 /* 2871 * Check the map for holes in the specified region. 2872 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2873 */ 2874 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2875 (entry->end < end && entry->next->start > entry->end)) { 2876 end = entry->end; 2877 rv = KERN_INVALID_ADDRESS; 2878 goto done; 2879 } 2880 /* 2881 * If system unwiring, require that the entry is system wired. 2882 */ 2883 if (!user_unwire && 2884 vm_map_entry_system_wired_count(entry) == 0) { 2885 end = entry->end; 2886 rv = KERN_INVALID_ARGUMENT; 2887 goto done; 2888 } 2889 entry = entry->next; 2890 } 2891 rv = KERN_SUCCESS; 2892 done: 2893 need_wakeup = FALSE; 2894 if (first_entry == NULL) { 2895 result = vm_map_lookup_entry(map, start, &first_entry); 2896 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2897 first_entry = first_entry->next; 2898 else 2899 KASSERT(result, ("vm_map_unwire: lookup failed")); 2900 } 2901 for (entry = first_entry; entry->start < end; entry = entry->next) { 2902 /* 2903 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2904 * space in the unwired region could have been mapped 2905 * while the map lock was dropped for draining 2906 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2907 * could be simultaneously wiring this new mapping 2908 * entry. Detect these cases and skip any entries 2909 * marked as in transition by us. 2910 */ 2911 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2912 entry->wiring_thread != curthread) { 2913 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2914 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2915 continue; 2916 } 2917 2918 if (rv == KERN_SUCCESS && (!user_unwire || 2919 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2920 if (user_unwire) 2921 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2922 if (entry->wired_count == 1) 2923 vm_map_entry_unwire(map, entry); 2924 else 2925 entry->wired_count--; 2926 } 2927 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2928 ("vm_map_unwire: in-transition flag missing %p", entry)); 2929 KASSERT(entry->wiring_thread == curthread, 2930 ("vm_map_unwire: alien wire %p", entry)); 2931 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2932 entry->wiring_thread = NULL; 2933 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2934 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2935 need_wakeup = TRUE; 2936 } 2937 vm_map_simplify_entry(map, entry); 2938 } 2939 vm_map_unlock(map); 2940 if (need_wakeup) 2941 vm_map_wakeup(map); 2942 return (rv); 2943 } 2944 2945 /* 2946 * vm_map_wire_entry_failure: 2947 * 2948 * Handle a wiring failure on the given entry. 2949 * 2950 * The map should be locked. 2951 */ 2952 static void 2953 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2954 vm_offset_t failed_addr) 2955 { 2956 2957 VM_MAP_ASSERT_LOCKED(map); 2958 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2959 entry->wired_count == 1, 2960 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2961 KASSERT(failed_addr < entry->end, 2962 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2963 2964 /* 2965 * If any pages at the start of this entry were successfully wired, 2966 * then unwire them. 2967 */ 2968 if (failed_addr > entry->start) { 2969 pmap_unwire(map->pmap, entry->start, failed_addr); 2970 vm_object_unwire(entry->object.vm_object, entry->offset, 2971 failed_addr - entry->start, PQ_ACTIVE); 2972 } 2973 2974 /* 2975 * Assign an out-of-range value to represent the failure to wire this 2976 * entry. 2977 */ 2978 entry->wired_count = -1; 2979 } 2980 2981 /* 2982 * vm_map_wire: 2983 * 2984 * Implements both kernel and user wiring. 2985 */ 2986 int 2987 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2988 int flags) 2989 { 2990 vm_map_entry_t entry, first_entry, tmp_entry; 2991 vm_offset_t faddr, saved_end, saved_start; 2992 unsigned int last_timestamp; 2993 int rv; 2994 boolean_t need_wakeup, result, user_wire; 2995 vm_prot_t prot; 2996 2997 if (start == end) 2998 return (KERN_SUCCESS); 2999 prot = 0; 3000 if (flags & VM_MAP_WIRE_WRITE) 3001 prot |= VM_PROT_WRITE; 3002 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 3003 vm_map_lock(map); 3004 VM_MAP_RANGE_CHECK(map, start, end); 3005 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3006 if (flags & VM_MAP_WIRE_HOLESOK) 3007 first_entry = first_entry->next; 3008 else { 3009 vm_map_unlock(map); 3010 return (KERN_INVALID_ADDRESS); 3011 } 3012 } 3013 last_timestamp = map->timestamp; 3014 entry = first_entry; 3015 while (entry->start < end) { 3016 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3017 /* 3018 * We have not yet clipped the entry. 3019 */ 3020 saved_start = (start >= entry->start) ? start : 3021 entry->start; 3022 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3023 if (vm_map_unlock_and_wait(map, 0)) { 3024 /* 3025 * Allow interruption of user wiring? 3026 */ 3027 } 3028 vm_map_lock(map); 3029 if (last_timestamp + 1 != map->timestamp) { 3030 /* 3031 * Look again for the entry because the map was 3032 * modified while it was unlocked. 3033 * Specifically, the entry may have been 3034 * clipped, merged, or deleted. 3035 */ 3036 if (!vm_map_lookup_entry(map, saved_start, 3037 &tmp_entry)) { 3038 if (flags & VM_MAP_WIRE_HOLESOK) 3039 tmp_entry = tmp_entry->next; 3040 else { 3041 if (saved_start == start) { 3042 /* 3043 * first_entry has been deleted. 3044 */ 3045 vm_map_unlock(map); 3046 return (KERN_INVALID_ADDRESS); 3047 } 3048 end = saved_start; 3049 rv = KERN_INVALID_ADDRESS; 3050 goto done; 3051 } 3052 } 3053 if (entry == first_entry) 3054 first_entry = tmp_entry; 3055 else 3056 first_entry = NULL; 3057 entry = tmp_entry; 3058 } 3059 last_timestamp = map->timestamp; 3060 continue; 3061 } 3062 vm_map_clip_start(map, entry, start); 3063 vm_map_clip_end(map, entry, end); 3064 /* 3065 * Mark the entry in case the map lock is released. (See 3066 * above.) 3067 */ 3068 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3069 entry->wiring_thread == NULL, 3070 ("owned map entry %p", entry)); 3071 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3072 entry->wiring_thread = curthread; 3073 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3074 || (entry->protection & prot) != prot) { 3075 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3076 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 3077 end = entry->end; 3078 rv = KERN_INVALID_ADDRESS; 3079 goto done; 3080 } 3081 goto next_entry; 3082 } 3083 if (entry->wired_count == 0) { 3084 entry->wired_count++; 3085 saved_start = entry->start; 3086 saved_end = entry->end; 3087 3088 /* 3089 * Release the map lock, relying on the in-transition 3090 * mark. Mark the map busy for fork. 3091 */ 3092 vm_map_busy(map); 3093 vm_map_unlock(map); 3094 3095 faddr = saved_start; 3096 do { 3097 /* 3098 * Simulate a fault to get the page and enter 3099 * it into the physical map. 3100 */ 3101 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 3102 VM_FAULT_WIRE)) != KERN_SUCCESS) 3103 break; 3104 } while ((faddr += PAGE_SIZE) < saved_end); 3105 vm_map_lock(map); 3106 vm_map_unbusy(map); 3107 if (last_timestamp + 1 != map->timestamp) { 3108 /* 3109 * Look again for the entry because the map was 3110 * modified while it was unlocked. The entry 3111 * may have been clipped, but NOT merged or 3112 * deleted. 3113 */ 3114 result = vm_map_lookup_entry(map, saved_start, 3115 &tmp_entry); 3116 KASSERT(result, ("vm_map_wire: lookup failed")); 3117 if (entry == first_entry) 3118 first_entry = tmp_entry; 3119 else 3120 first_entry = NULL; 3121 entry = tmp_entry; 3122 while (entry->end < saved_end) { 3123 /* 3124 * In case of failure, handle entries 3125 * that were not fully wired here; 3126 * fully wired entries are handled 3127 * later. 3128 */ 3129 if (rv != KERN_SUCCESS && 3130 faddr < entry->end) 3131 vm_map_wire_entry_failure(map, 3132 entry, faddr); 3133 entry = entry->next; 3134 } 3135 } 3136 last_timestamp = map->timestamp; 3137 if (rv != KERN_SUCCESS) { 3138 vm_map_wire_entry_failure(map, entry, faddr); 3139 end = entry->end; 3140 goto done; 3141 } 3142 } else if (!user_wire || 3143 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3144 entry->wired_count++; 3145 } 3146 /* 3147 * Check the map for holes in the specified region. 3148 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 3149 */ 3150 next_entry: 3151 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 3152 entry->end < end && entry->next->start > entry->end) { 3153 end = entry->end; 3154 rv = KERN_INVALID_ADDRESS; 3155 goto done; 3156 } 3157 entry = entry->next; 3158 } 3159 rv = KERN_SUCCESS; 3160 done: 3161 need_wakeup = FALSE; 3162 if (first_entry == NULL) { 3163 result = vm_map_lookup_entry(map, start, &first_entry); 3164 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 3165 first_entry = first_entry->next; 3166 else 3167 KASSERT(result, ("vm_map_wire: lookup failed")); 3168 } 3169 for (entry = first_entry; entry->start < end; entry = entry->next) { 3170 /* 3171 * If VM_MAP_WIRE_HOLESOK was specified, an empty 3172 * space in the unwired region could have been mapped 3173 * while the map lock was dropped for faulting in the 3174 * pages or draining MAP_ENTRY_IN_TRANSITION. 3175 * Moreover, another thread could be simultaneously 3176 * wiring this new mapping entry. Detect these cases 3177 * and skip any entries marked as in transition not by us. 3178 */ 3179 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3180 entry->wiring_thread != curthread) { 3181 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 3182 ("vm_map_wire: !HOLESOK and new/changed entry")); 3183 continue; 3184 } 3185 3186 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 3187 goto next_entry_done; 3188 3189 if (rv == KERN_SUCCESS) { 3190 if (user_wire) 3191 entry->eflags |= MAP_ENTRY_USER_WIRED; 3192 } else if (entry->wired_count == -1) { 3193 /* 3194 * Wiring failed on this entry. Thus, unwiring is 3195 * unnecessary. 3196 */ 3197 entry->wired_count = 0; 3198 } else if (!user_wire || 3199 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3200 /* 3201 * Undo the wiring. Wiring succeeded on this entry 3202 * but failed on a later entry. 3203 */ 3204 if (entry->wired_count == 1) 3205 vm_map_entry_unwire(map, entry); 3206 else 3207 entry->wired_count--; 3208 } 3209 next_entry_done: 3210 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3211 ("vm_map_wire: in-transition flag missing %p", entry)); 3212 KASSERT(entry->wiring_thread == curthread, 3213 ("vm_map_wire: alien wire %p", entry)); 3214 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3215 MAP_ENTRY_WIRE_SKIPPED); 3216 entry->wiring_thread = NULL; 3217 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3218 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3219 need_wakeup = TRUE; 3220 } 3221 vm_map_simplify_entry(map, entry); 3222 } 3223 vm_map_unlock(map); 3224 if (need_wakeup) 3225 vm_map_wakeup(map); 3226 return (rv); 3227 } 3228 3229 /* 3230 * vm_map_sync 3231 * 3232 * Push any dirty cached pages in the address range to their pager. 3233 * If syncio is TRUE, dirty pages are written synchronously. 3234 * If invalidate is TRUE, any cached pages are freed as well. 3235 * 3236 * If the size of the region from start to end is zero, we are 3237 * supposed to flush all modified pages within the region containing 3238 * start. Unfortunately, a region can be split or coalesced with 3239 * neighboring regions, making it difficult to determine what the 3240 * original region was. Therefore, we approximate this requirement by 3241 * flushing the current region containing start. 3242 * 3243 * Returns an error if any part of the specified range is not mapped. 3244 */ 3245 int 3246 vm_map_sync( 3247 vm_map_t map, 3248 vm_offset_t start, 3249 vm_offset_t end, 3250 boolean_t syncio, 3251 boolean_t invalidate) 3252 { 3253 vm_map_entry_t current; 3254 vm_map_entry_t entry; 3255 vm_size_t size; 3256 vm_object_t object; 3257 vm_ooffset_t offset; 3258 unsigned int last_timestamp; 3259 boolean_t failed; 3260 3261 vm_map_lock_read(map); 3262 VM_MAP_RANGE_CHECK(map, start, end); 3263 if (!vm_map_lookup_entry(map, start, &entry)) { 3264 vm_map_unlock_read(map); 3265 return (KERN_INVALID_ADDRESS); 3266 } else if (start == end) { 3267 start = entry->start; 3268 end = entry->end; 3269 } 3270 /* 3271 * Make a first pass to check for user-wired memory and holes. 3272 */ 3273 for (current = entry; current->start < end; current = current->next) { 3274 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 3275 vm_map_unlock_read(map); 3276 return (KERN_INVALID_ARGUMENT); 3277 } 3278 if (end > current->end && 3279 current->end != current->next->start) { 3280 vm_map_unlock_read(map); 3281 return (KERN_INVALID_ADDRESS); 3282 } 3283 } 3284 3285 if (invalidate) 3286 pmap_remove(map->pmap, start, end); 3287 failed = FALSE; 3288 3289 /* 3290 * Make a second pass, cleaning/uncaching pages from the indicated 3291 * objects as we go. 3292 */ 3293 for (current = entry; current->start < end;) { 3294 offset = current->offset + (start - current->start); 3295 size = (end <= current->end ? end : current->end) - start; 3296 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 3297 vm_map_t smap; 3298 vm_map_entry_t tentry; 3299 vm_size_t tsize; 3300 3301 smap = current->object.sub_map; 3302 vm_map_lock_read(smap); 3303 (void) vm_map_lookup_entry(smap, offset, &tentry); 3304 tsize = tentry->end - offset; 3305 if (tsize < size) 3306 size = tsize; 3307 object = tentry->object.vm_object; 3308 offset = tentry->offset + (offset - tentry->start); 3309 vm_map_unlock_read(smap); 3310 } else { 3311 object = current->object.vm_object; 3312 } 3313 vm_object_reference(object); 3314 last_timestamp = map->timestamp; 3315 vm_map_unlock_read(map); 3316 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3317 failed = TRUE; 3318 start += size; 3319 vm_object_deallocate(object); 3320 vm_map_lock_read(map); 3321 if (last_timestamp == map->timestamp || 3322 !vm_map_lookup_entry(map, start, ¤t)) 3323 current = current->next; 3324 } 3325 3326 vm_map_unlock_read(map); 3327 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3328 } 3329 3330 /* 3331 * vm_map_entry_unwire: [ internal use only ] 3332 * 3333 * Make the region specified by this entry pageable. 3334 * 3335 * The map in question should be locked. 3336 * [This is the reason for this routine's existence.] 3337 */ 3338 static void 3339 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3340 { 3341 3342 VM_MAP_ASSERT_LOCKED(map); 3343 KASSERT(entry->wired_count > 0, 3344 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3345 pmap_unwire(map->pmap, entry->start, entry->end); 3346 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 3347 entry->start, PQ_ACTIVE); 3348 entry->wired_count = 0; 3349 } 3350 3351 static void 3352 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3353 { 3354 3355 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3356 vm_object_deallocate(entry->object.vm_object); 3357 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3358 } 3359 3360 /* 3361 * vm_map_entry_delete: [ internal use only ] 3362 * 3363 * Deallocate the given entry from the target map. 3364 */ 3365 static void 3366 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3367 { 3368 vm_object_t object; 3369 vm_pindex_t offidxstart, offidxend, count, size1; 3370 vm_size_t size; 3371 3372 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3373 object = entry->object.vm_object; 3374 3375 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3376 MPASS(entry->cred == NULL); 3377 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3378 MPASS(object == NULL); 3379 vm_map_entry_deallocate(entry, map->system_map); 3380 return; 3381 } 3382 3383 size = entry->end - entry->start; 3384 map->size -= size; 3385 3386 if (entry->cred != NULL) { 3387 swap_release_by_cred(size, entry->cred); 3388 crfree(entry->cred); 3389 } 3390 3391 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 3392 (object != NULL)) { 3393 KASSERT(entry->cred == NULL || object->cred == NULL || 3394 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3395 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3396 count = atop(size); 3397 offidxstart = OFF_TO_IDX(entry->offset); 3398 offidxend = offidxstart + count; 3399 VM_OBJECT_WLOCK(object); 3400 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 3401 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 3402 object == kernel_object)) { 3403 vm_object_collapse(object); 3404 3405 /* 3406 * The option OBJPR_NOTMAPPED can be passed here 3407 * because vm_map_delete() already performed 3408 * pmap_remove() on the only mapping to this range 3409 * of pages. 3410 */ 3411 vm_object_page_remove(object, offidxstart, offidxend, 3412 OBJPR_NOTMAPPED); 3413 if (object->type == OBJT_SWAP) 3414 swap_pager_freespace(object, offidxstart, 3415 count); 3416 if (offidxend >= object->size && 3417 offidxstart < object->size) { 3418 size1 = object->size; 3419 object->size = offidxstart; 3420 if (object->cred != NULL) { 3421 size1 -= object->size; 3422 KASSERT(object->charge >= ptoa(size1), 3423 ("object %p charge < 0", object)); 3424 swap_release_by_cred(ptoa(size1), 3425 object->cred); 3426 object->charge -= ptoa(size1); 3427 } 3428 } 3429 } 3430 VM_OBJECT_WUNLOCK(object); 3431 } else 3432 entry->object.vm_object = NULL; 3433 if (map->system_map) 3434 vm_map_entry_deallocate(entry, TRUE); 3435 else { 3436 entry->next = curthread->td_map_def_user; 3437 curthread->td_map_def_user = entry; 3438 } 3439 } 3440 3441 /* 3442 * vm_map_delete: [ internal use only ] 3443 * 3444 * Deallocates the given address range from the target 3445 * map. 3446 */ 3447 int 3448 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3449 { 3450 vm_map_entry_t entry; 3451 vm_map_entry_t first_entry; 3452 3453 VM_MAP_ASSERT_LOCKED(map); 3454 if (start == end) 3455 return (KERN_SUCCESS); 3456 3457 /* 3458 * Find the start of the region, and clip it 3459 */ 3460 if (!vm_map_lookup_entry(map, start, &first_entry)) 3461 entry = first_entry->next; 3462 else { 3463 entry = first_entry; 3464 vm_map_clip_start(map, entry, start); 3465 } 3466 3467 /* 3468 * Step through all entries in this region 3469 */ 3470 while (entry->start < end) { 3471 vm_map_entry_t next; 3472 3473 /* 3474 * Wait for wiring or unwiring of an entry to complete. 3475 * Also wait for any system wirings to disappear on 3476 * user maps. 3477 */ 3478 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3479 (vm_map_pmap(map) != kernel_pmap && 3480 vm_map_entry_system_wired_count(entry) != 0)) { 3481 unsigned int last_timestamp; 3482 vm_offset_t saved_start; 3483 vm_map_entry_t tmp_entry; 3484 3485 saved_start = entry->start; 3486 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3487 last_timestamp = map->timestamp; 3488 (void) vm_map_unlock_and_wait(map, 0); 3489 vm_map_lock(map); 3490 if (last_timestamp + 1 != map->timestamp) { 3491 /* 3492 * Look again for the entry because the map was 3493 * modified while it was unlocked. 3494 * Specifically, the entry may have been 3495 * clipped, merged, or deleted. 3496 */ 3497 if (!vm_map_lookup_entry(map, saved_start, 3498 &tmp_entry)) 3499 entry = tmp_entry->next; 3500 else { 3501 entry = tmp_entry; 3502 vm_map_clip_start(map, entry, 3503 saved_start); 3504 } 3505 } 3506 continue; 3507 } 3508 vm_map_clip_end(map, entry, end); 3509 3510 next = entry->next; 3511 3512 /* 3513 * Unwire before removing addresses from the pmap; otherwise, 3514 * unwiring will put the entries back in the pmap. 3515 */ 3516 if (entry->wired_count != 0) 3517 vm_map_entry_unwire(map, entry); 3518 3519 /* 3520 * Remove mappings for the pages, but only if the 3521 * mappings could exist. For instance, it does not 3522 * make sense to call pmap_remove() for guard entries. 3523 */ 3524 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 3525 entry->object.vm_object != NULL) 3526 pmap_remove(map->pmap, entry->start, entry->end); 3527 3528 if (entry->end == map->anon_loc) 3529 map->anon_loc = entry->start; 3530 3531 /* 3532 * Delete the entry only after removing all pmap 3533 * entries pointing to its pages. (Otherwise, its 3534 * page frames may be reallocated, and any modify bits 3535 * will be set in the wrong object!) 3536 */ 3537 vm_map_entry_delete(map, entry); 3538 entry = next; 3539 } 3540 return (KERN_SUCCESS); 3541 } 3542 3543 /* 3544 * vm_map_remove: 3545 * 3546 * Remove the given address range from the target map. 3547 * This is the exported form of vm_map_delete. 3548 */ 3549 int 3550 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3551 { 3552 int result; 3553 3554 vm_map_lock(map); 3555 VM_MAP_RANGE_CHECK(map, start, end); 3556 result = vm_map_delete(map, start, end); 3557 vm_map_unlock(map); 3558 return (result); 3559 } 3560 3561 /* 3562 * vm_map_check_protection: 3563 * 3564 * Assert that the target map allows the specified privilege on the 3565 * entire address region given. The entire region must be allocated. 3566 * 3567 * WARNING! This code does not and should not check whether the 3568 * contents of the region is accessible. For example a smaller file 3569 * might be mapped into a larger address space. 3570 * 3571 * NOTE! This code is also called by munmap(). 3572 * 3573 * The map must be locked. A read lock is sufficient. 3574 */ 3575 boolean_t 3576 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3577 vm_prot_t protection) 3578 { 3579 vm_map_entry_t entry; 3580 vm_map_entry_t tmp_entry; 3581 3582 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3583 return (FALSE); 3584 entry = tmp_entry; 3585 3586 while (start < end) { 3587 /* 3588 * No holes allowed! 3589 */ 3590 if (start < entry->start) 3591 return (FALSE); 3592 /* 3593 * Check protection associated with entry. 3594 */ 3595 if ((entry->protection & protection) != protection) 3596 return (FALSE); 3597 /* go to next entry */ 3598 start = entry->end; 3599 entry = entry->next; 3600 } 3601 return (TRUE); 3602 } 3603 3604 /* 3605 * vm_map_copy_entry: 3606 * 3607 * Copies the contents of the source entry to the destination 3608 * entry. The entries *must* be aligned properly. 3609 */ 3610 static void 3611 vm_map_copy_entry( 3612 vm_map_t src_map, 3613 vm_map_t dst_map, 3614 vm_map_entry_t src_entry, 3615 vm_map_entry_t dst_entry, 3616 vm_ooffset_t *fork_charge) 3617 { 3618 vm_object_t src_object; 3619 vm_map_entry_t fake_entry; 3620 vm_offset_t size; 3621 struct ucred *cred; 3622 int charged; 3623 3624 VM_MAP_ASSERT_LOCKED(dst_map); 3625 3626 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3627 return; 3628 3629 if (src_entry->wired_count == 0 || 3630 (src_entry->protection & VM_PROT_WRITE) == 0) { 3631 /* 3632 * If the source entry is marked needs_copy, it is already 3633 * write-protected. 3634 */ 3635 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3636 (src_entry->protection & VM_PROT_WRITE) != 0) { 3637 pmap_protect(src_map->pmap, 3638 src_entry->start, 3639 src_entry->end, 3640 src_entry->protection & ~VM_PROT_WRITE); 3641 } 3642 3643 /* 3644 * Make a copy of the object. 3645 */ 3646 size = src_entry->end - src_entry->start; 3647 if ((src_object = src_entry->object.vm_object) != NULL) { 3648 VM_OBJECT_WLOCK(src_object); 3649 charged = ENTRY_CHARGED(src_entry); 3650 if (src_object->handle == NULL && 3651 (src_object->type == OBJT_DEFAULT || 3652 src_object->type == OBJT_SWAP)) { 3653 vm_object_collapse(src_object); 3654 if ((src_object->flags & (OBJ_NOSPLIT | 3655 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3656 vm_object_split(src_entry); 3657 src_object = 3658 src_entry->object.vm_object; 3659 } 3660 } 3661 vm_object_reference_locked(src_object); 3662 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3663 if (src_entry->cred != NULL && 3664 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3665 KASSERT(src_object->cred == NULL, 3666 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3667 src_object)); 3668 src_object->cred = src_entry->cred; 3669 src_object->charge = size; 3670 } 3671 VM_OBJECT_WUNLOCK(src_object); 3672 dst_entry->object.vm_object = src_object; 3673 if (charged) { 3674 cred = curthread->td_ucred; 3675 crhold(cred); 3676 dst_entry->cred = cred; 3677 *fork_charge += size; 3678 if (!(src_entry->eflags & 3679 MAP_ENTRY_NEEDS_COPY)) { 3680 crhold(cred); 3681 src_entry->cred = cred; 3682 *fork_charge += size; 3683 } 3684 } 3685 src_entry->eflags |= MAP_ENTRY_COW | 3686 MAP_ENTRY_NEEDS_COPY; 3687 dst_entry->eflags |= MAP_ENTRY_COW | 3688 MAP_ENTRY_NEEDS_COPY; 3689 dst_entry->offset = src_entry->offset; 3690 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3691 /* 3692 * MAP_ENTRY_VN_WRITECNT cannot 3693 * indicate write reference from 3694 * src_entry, since the entry is 3695 * marked as needs copy. Allocate a 3696 * fake entry that is used to 3697 * decrement object->un_pager.vnp.writecount 3698 * at the appropriate time. Attach 3699 * fake_entry to the deferred list. 3700 */ 3701 fake_entry = vm_map_entry_create(dst_map); 3702 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3703 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3704 vm_object_reference(src_object); 3705 fake_entry->object.vm_object = src_object; 3706 fake_entry->start = src_entry->start; 3707 fake_entry->end = src_entry->end; 3708 fake_entry->next = curthread->td_map_def_user; 3709 curthread->td_map_def_user = fake_entry; 3710 } 3711 3712 pmap_copy(dst_map->pmap, src_map->pmap, 3713 dst_entry->start, dst_entry->end - dst_entry->start, 3714 src_entry->start); 3715 } else { 3716 dst_entry->object.vm_object = NULL; 3717 dst_entry->offset = 0; 3718 if (src_entry->cred != NULL) { 3719 dst_entry->cred = curthread->td_ucred; 3720 crhold(dst_entry->cred); 3721 *fork_charge += size; 3722 } 3723 } 3724 } else { 3725 /* 3726 * We don't want to make writeable wired pages copy-on-write. 3727 * Immediately copy these pages into the new map by simulating 3728 * page faults. The new pages are pageable. 3729 */ 3730 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3731 fork_charge); 3732 } 3733 } 3734 3735 /* 3736 * vmspace_map_entry_forked: 3737 * Update the newly-forked vmspace each time a map entry is inherited 3738 * or copied. The values for vm_dsize and vm_tsize are approximate 3739 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3740 */ 3741 static void 3742 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3743 vm_map_entry_t entry) 3744 { 3745 vm_size_t entrysize; 3746 vm_offset_t newend; 3747 3748 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3749 return; 3750 entrysize = entry->end - entry->start; 3751 vm2->vm_map.size += entrysize; 3752 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3753 vm2->vm_ssize += btoc(entrysize); 3754 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3755 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3756 newend = MIN(entry->end, 3757 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3758 vm2->vm_dsize += btoc(newend - entry->start); 3759 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3760 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3761 newend = MIN(entry->end, 3762 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3763 vm2->vm_tsize += btoc(newend - entry->start); 3764 } 3765 } 3766 3767 /* 3768 * vmspace_fork: 3769 * Create a new process vmspace structure and vm_map 3770 * based on those of an existing process. The new map 3771 * is based on the old map, according to the inheritance 3772 * values on the regions in that map. 3773 * 3774 * XXX It might be worth coalescing the entries added to the new vmspace. 3775 * 3776 * The source map must not be locked. 3777 */ 3778 struct vmspace * 3779 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3780 { 3781 struct vmspace *vm2; 3782 vm_map_t new_map, old_map; 3783 vm_map_entry_t new_entry, old_entry; 3784 vm_object_t object; 3785 int error, locked; 3786 vm_inherit_t inh; 3787 3788 old_map = &vm1->vm_map; 3789 /* Copy immutable fields of vm1 to vm2. */ 3790 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 3791 pmap_pinit); 3792 if (vm2 == NULL) 3793 return (NULL); 3794 3795 vm2->vm_taddr = vm1->vm_taddr; 3796 vm2->vm_daddr = vm1->vm_daddr; 3797 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3798 vm_map_lock(old_map); 3799 if (old_map->busy) 3800 vm_map_wait_busy(old_map); 3801 new_map = &vm2->vm_map; 3802 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3803 KASSERT(locked, ("vmspace_fork: lock failed")); 3804 3805 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 3806 if (error != 0) { 3807 sx_xunlock(&old_map->lock); 3808 sx_xunlock(&new_map->lock); 3809 vm_map_process_deferred(); 3810 vmspace_free(vm2); 3811 return (NULL); 3812 } 3813 3814 new_map->anon_loc = old_map->anon_loc; 3815 3816 old_entry = old_map->header.next; 3817 3818 while (old_entry != &old_map->header) { 3819 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3820 panic("vm_map_fork: encountered a submap"); 3821 3822 inh = old_entry->inheritance; 3823 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3824 inh != VM_INHERIT_NONE) 3825 inh = VM_INHERIT_COPY; 3826 3827 switch (inh) { 3828 case VM_INHERIT_NONE: 3829 break; 3830 3831 case VM_INHERIT_SHARE: 3832 /* 3833 * Clone the entry, creating the shared object if necessary. 3834 */ 3835 object = old_entry->object.vm_object; 3836 if (object == NULL) { 3837 object = vm_object_allocate(OBJT_DEFAULT, 3838 atop(old_entry->end - old_entry->start)); 3839 old_entry->object.vm_object = object; 3840 old_entry->offset = 0; 3841 if (old_entry->cred != NULL) { 3842 object->cred = old_entry->cred; 3843 object->charge = old_entry->end - 3844 old_entry->start; 3845 old_entry->cred = NULL; 3846 } 3847 } 3848 3849 /* 3850 * Add the reference before calling vm_object_shadow 3851 * to insure that a shadow object is created. 3852 */ 3853 vm_object_reference(object); 3854 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3855 vm_object_shadow(&old_entry->object.vm_object, 3856 &old_entry->offset, 3857 old_entry->end - old_entry->start); 3858 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3859 /* Transfer the second reference too. */ 3860 vm_object_reference( 3861 old_entry->object.vm_object); 3862 3863 /* 3864 * As in vm_map_simplify_entry(), the 3865 * vnode lock will not be acquired in 3866 * this call to vm_object_deallocate(). 3867 */ 3868 vm_object_deallocate(object); 3869 object = old_entry->object.vm_object; 3870 } 3871 VM_OBJECT_WLOCK(object); 3872 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3873 if (old_entry->cred != NULL) { 3874 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3875 object->cred = old_entry->cred; 3876 object->charge = old_entry->end - old_entry->start; 3877 old_entry->cred = NULL; 3878 } 3879 3880 /* 3881 * Assert the correct state of the vnode 3882 * v_writecount while the object is locked, to 3883 * not relock it later for the assertion 3884 * correctness. 3885 */ 3886 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3887 object->type == OBJT_VNODE) { 3888 KASSERT(((struct vnode *)object->handle)-> 3889 v_writecount > 0, 3890 ("vmspace_fork: v_writecount %p", object)); 3891 KASSERT(object->un_pager.vnp.writemappings > 0, 3892 ("vmspace_fork: vnp.writecount %p", 3893 object)); 3894 } 3895 VM_OBJECT_WUNLOCK(object); 3896 3897 /* 3898 * Clone the entry, referencing the shared object. 3899 */ 3900 new_entry = vm_map_entry_create(new_map); 3901 *new_entry = *old_entry; 3902 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3903 MAP_ENTRY_IN_TRANSITION); 3904 new_entry->wiring_thread = NULL; 3905 new_entry->wired_count = 0; 3906 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3907 vnode_pager_update_writecount(object, 3908 new_entry->start, new_entry->end); 3909 } 3910 vm_map_entry_set_vnode_text(new_entry, true); 3911 3912 /* 3913 * Insert the entry into the new map -- we know we're 3914 * inserting at the end of the new map. 3915 */ 3916 vm_map_entry_link(new_map, new_entry); 3917 vmspace_map_entry_forked(vm1, vm2, new_entry); 3918 3919 /* 3920 * Update the physical map 3921 */ 3922 pmap_copy(new_map->pmap, old_map->pmap, 3923 new_entry->start, 3924 (old_entry->end - old_entry->start), 3925 old_entry->start); 3926 break; 3927 3928 case VM_INHERIT_COPY: 3929 /* 3930 * Clone the entry and link into the map. 3931 */ 3932 new_entry = vm_map_entry_create(new_map); 3933 *new_entry = *old_entry; 3934 /* 3935 * Copied entry is COW over the old object. 3936 */ 3937 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3938 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3939 new_entry->wiring_thread = NULL; 3940 new_entry->wired_count = 0; 3941 new_entry->object.vm_object = NULL; 3942 new_entry->cred = NULL; 3943 vm_map_entry_link(new_map, new_entry); 3944 vmspace_map_entry_forked(vm1, vm2, new_entry); 3945 vm_map_copy_entry(old_map, new_map, old_entry, 3946 new_entry, fork_charge); 3947 vm_map_entry_set_vnode_text(new_entry, true); 3948 break; 3949 3950 case VM_INHERIT_ZERO: 3951 /* 3952 * Create a new anonymous mapping entry modelled from 3953 * the old one. 3954 */ 3955 new_entry = vm_map_entry_create(new_map); 3956 memset(new_entry, 0, sizeof(*new_entry)); 3957 3958 new_entry->start = old_entry->start; 3959 new_entry->end = old_entry->end; 3960 new_entry->eflags = old_entry->eflags & 3961 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3962 MAP_ENTRY_VN_WRITECNT | MAP_ENTRY_VN_EXEC); 3963 new_entry->protection = old_entry->protection; 3964 new_entry->max_protection = old_entry->max_protection; 3965 new_entry->inheritance = VM_INHERIT_ZERO; 3966 3967 vm_map_entry_link(new_map, new_entry); 3968 vmspace_map_entry_forked(vm1, vm2, new_entry); 3969 3970 new_entry->cred = curthread->td_ucred; 3971 crhold(new_entry->cred); 3972 *fork_charge += (new_entry->end - new_entry->start); 3973 3974 break; 3975 } 3976 old_entry = old_entry->next; 3977 } 3978 /* 3979 * Use inlined vm_map_unlock() to postpone handling the deferred 3980 * map entries, which cannot be done until both old_map and 3981 * new_map locks are released. 3982 */ 3983 sx_xunlock(&old_map->lock); 3984 sx_xunlock(&new_map->lock); 3985 vm_map_process_deferred(); 3986 3987 return (vm2); 3988 } 3989 3990 /* 3991 * Create a process's stack for exec_new_vmspace(). This function is never 3992 * asked to wire the newly created stack. 3993 */ 3994 int 3995 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3996 vm_prot_t prot, vm_prot_t max, int cow) 3997 { 3998 vm_size_t growsize, init_ssize; 3999 rlim_t vmemlim; 4000 int rv; 4001 4002 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4003 growsize = sgrowsiz; 4004 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4005 vm_map_lock(map); 4006 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4007 /* If we would blow our VMEM resource limit, no go */ 4008 if (map->size + init_ssize > vmemlim) { 4009 rv = KERN_NO_SPACE; 4010 goto out; 4011 } 4012 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4013 max, cow); 4014 out: 4015 vm_map_unlock(map); 4016 return (rv); 4017 } 4018 4019 static int stack_guard_page = 1; 4020 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4021 &stack_guard_page, 0, 4022 "Specifies the number of guard pages for a stack that grows"); 4023 4024 static int 4025 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4026 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4027 { 4028 vm_map_entry_t new_entry, prev_entry; 4029 vm_offset_t bot, gap_bot, gap_top, top; 4030 vm_size_t init_ssize, sgp; 4031 int orient, rv; 4032 4033 /* 4034 * The stack orientation is piggybacked with the cow argument. 4035 * Extract it into orient and mask the cow argument so that we 4036 * don't pass it around further. 4037 */ 4038 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 4039 KASSERT(orient != 0, ("No stack grow direction")); 4040 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 4041 ("bi-dir stack")); 4042 4043 if (addrbos < vm_map_min(map) || 4044 addrbos + max_ssize > vm_map_max(map) || 4045 addrbos + max_ssize <= addrbos) 4046 return (KERN_INVALID_ADDRESS); 4047 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 4048 if (sgp >= max_ssize) 4049 return (KERN_INVALID_ARGUMENT); 4050 4051 init_ssize = growsize; 4052 if (max_ssize < init_ssize + sgp) 4053 init_ssize = max_ssize - sgp; 4054 4055 /* If addr is already mapped, no go */ 4056 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4057 return (KERN_NO_SPACE); 4058 4059 /* 4060 * If we can't accommodate max_ssize in the current mapping, no go. 4061 */ 4062 if (prev_entry->next->start < addrbos + max_ssize) 4063 return (KERN_NO_SPACE); 4064 4065 /* 4066 * We initially map a stack of only init_ssize. We will grow as 4067 * needed later. Depending on the orientation of the stack (i.e. 4068 * the grow direction) we either map at the top of the range, the 4069 * bottom of the range or in the middle. 4070 * 4071 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4072 * and cow to be 0. Possibly we should eliminate these as input 4073 * parameters, and just pass these values here in the insert call. 4074 */ 4075 if (orient == MAP_STACK_GROWS_DOWN) { 4076 bot = addrbos + max_ssize - init_ssize; 4077 top = bot + init_ssize; 4078 gap_bot = addrbos; 4079 gap_top = bot; 4080 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 4081 bot = addrbos; 4082 top = bot + init_ssize; 4083 gap_bot = top; 4084 gap_top = addrbos + max_ssize; 4085 } 4086 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 4087 if (rv != KERN_SUCCESS) 4088 return (rv); 4089 new_entry = prev_entry->next; 4090 KASSERT(new_entry->end == top || new_entry->start == bot, 4091 ("Bad entry start/end for new stack entry")); 4092 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 4093 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4094 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4095 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 4096 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 4097 ("new entry lacks MAP_ENTRY_GROWS_UP")); 4098 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4099 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 4100 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 4101 if (rv != KERN_SUCCESS) 4102 (void)vm_map_delete(map, bot, top); 4103 return (rv); 4104 } 4105 4106 /* 4107 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4108 * successfully grow the stack. 4109 */ 4110 static int 4111 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4112 { 4113 vm_map_entry_t stack_entry; 4114 struct proc *p; 4115 struct vmspace *vm; 4116 struct ucred *cred; 4117 vm_offset_t gap_end, gap_start, grow_start; 4118 size_t grow_amount, guard, max_grow; 4119 rlim_t lmemlim, stacklim, vmemlim; 4120 int rv, rv1; 4121 bool gap_deleted, grow_down, is_procstack; 4122 #ifdef notyet 4123 uint64_t limit; 4124 #endif 4125 #ifdef RACCT 4126 int error; 4127 #endif 4128 4129 p = curproc; 4130 vm = p->p_vmspace; 4131 4132 /* 4133 * Disallow stack growth when the access is performed by a 4134 * debugger or AIO daemon. The reason is that the wrong 4135 * resource limits are applied. 4136 */ 4137 if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL) 4138 return (KERN_FAILURE); 4139 4140 MPASS(!map->system_map); 4141 4142 guard = stack_guard_page * PAGE_SIZE; 4143 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4144 stacklim = lim_cur(curthread, RLIMIT_STACK); 4145 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4146 retry: 4147 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4148 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4149 return (KERN_FAILURE); 4150 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4151 return (KERN_SUCCESS); 4152 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 4153 stack_entry = gap_entry->next; 4154 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4155 stack_entry->start != gap_entry->end) 4156 return (KERN_FAILURE); 4157 grow_amount = round_page(stack_entry->start - addr); 4158 grow_down = true; 4159 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 4160 stack_entry = gap_entry->prev; 4161 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 4162 stack_entry->end != gap_entry->start) 4163 return (KERN_FAILURE); 4164 grow_amount = round_page(addr + 1 - stack_entry->end); 4165 grow_down = false; 4166 } else { 4167 return (KERN_FAILURE); 4168 } 4169 max_grow = gap_entry->end - gap_entry->start; 4170 if (guard > max_grow) 4171 return (KERN_NO_SPACE); 4172 max_grow -= guard; 4173 if (grow_amount > max_grow) 4174 return (KERN_NO_SPACE); 4175 4176 /* 4177 * If this is the main process stack, see if we're over the stack 4178 * limit. 4179 */ 4180 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4181 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 4182 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4183 return (KERN_NO_SPACE); 4184 4185 #ifdef RACCT 4186 if (racct_enable) { 4187 PROC_LOCK(p); 4188 if (is_procstack && racct_set(p, RACCT_STACK, 4189 ctob(vm->vm_ssize) + grow_amount)) { 4190 PROC_UNLOCK(p); 4191 return (KERN_NO_SPACE); 4192 } 4193 PROC_UNLOCK(p); 4194 } 4195 #endif 4196 4197 grow_amount = roundup(grow_amount, sgrowsiz); 4198 if (grow_amount > max_grow) 4199 grow_amount = max_grow; 4200 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4201 grow_amount = trunc_page((vm_size_t)stacklim) - 4202 ctob(vm->vm_ssize); 4203 } 4204 4205 #ifdef notyet 4206 PROC_LOCK(p); 4207 limit = racct_get_available(p, RACCT_STACK); 4208 PROC_UNLOCK(p); 4209 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4210 grow_amount = limit - ctob(vm->vm_ssize); 4211 #endif 4212 4213 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4214 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4215 rv = KERN_NO_SPACE; 4216 goto out; 4217 } 4218 #ifdef RACCT 4219 if (racct_enable) { 4220 PROC_LOCK(p); 4221 if (racct_set(p, RACCT_MEMLOCK, 4222 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4223 PROC_UNLOCK(p); 4224 rv = KERN_NO_SPACE; 4225 goto out; 4226 } 4227 PROC_UNLOCK(p); 4228 } 4229 #endif 4230 } 4231 4232 /* If we would blow our VMEM resource limit, no go */ 4233 if (map->size + grow_amount > vmemlim) { 4234 rv = KERN_NO_SPACE; 4235 goto out; 4236 } 4237 #ifdef RACCT 4238 if (racct_enable) { 4239 PROC_LOCK(p); 4240 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4241 PROC_UNLOCK(p); 4242 rv = KERN_NO_SPACE; 4243 goto out; 4244 } 4245 PROC_UNLOCK(p); 4246 } 4247 #endif 4248 4249 if (vm_map_lock_upgrade(map)) { 4250 gap_entry = NULL; 4251 vm_map_lock_read(map); 4252 goto retry; 4253 } 4254 4255 if (grow_down) { 4256 grow_start = gap_entry->end - grow_amount; 4257 if (gap_entry->start + grow_amount == gap_entry->end) { 4258 gap_start = gap_entry->start; 4259 gap_end = gap_entry->end; 4260 vm_map_entry_delete(map, gap_entry); 4261 gap_deleted = true; 4262 } else { 4263 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4264 gap_entry->end -= grow_amount; 4265 vm_map_entry_resize_free(map, gap_entry); 4266 gap_deleted = false; 4267 } 4268 rv = vm_map_insert(map, NULL, 0, grow_start, 4269 grow_start + grow_amount, 4270 stack_entry->protection, stack_entry->max_protection, 4271 MAP_STACK_GROWS_DOWN); 4272 if (rv != KERN_SUCCESS) { 4273 if (gap_deleted) { 4274 rv1 = vm_map_insert(map, NULL, 0, gap_start, 4275 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4276 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 4277 MPASS(rv1 == KERN_SUCCESS); 4278 } else { 4279 gap_entry->end += grow_amount; 4280 vm_map_entry_resize_free(map, gap_entry); 4281 } 4282 } 4283 } else { 4284 grow_start = stack_entry->end; 4285 cred = stack_entry->cred; 4286 if (cred == NULL && stack_entry->object.vm_object != NULL) 4287 cred = stack_entry->object.vm_object->cred; 4288 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 4289 rv = KERN_NO_SPACE; 4290 /* Grow the underlying object if applicable. */ 4291 else if (stack_entry->object.vm_object == NULL || 4292 vm_object_coalesce(stack_entry->object.vm_object, 4293 stack_entry->offset, 4294 (vm_size_t)(stack_entry->end - stack_entry->start), 4295 (vm_size_t)grow_amount, cred != NULL)) { 4296 if (gap_entry->start + grow_amount == gap_entry->end) 4297 vm_map_entry_delete(map, gap_entry); 4298 else 4299 gap_entry->start += grow_amount; 4300 stack_entry->end += grow_amount; 4301 map->size += grow_amount; 4302 vm_map_entry_resize_free(map, stack_entry); 4303 rv = KERN_SUCCESS; 4304 } else 4305 rv = KERN_FAILURE; 4306 } 4307 if (rv == KERN_SUCCESS && is_procstack) 4308 vm->vm_ssize += btoc(grow_amount); 4309 4310 /* 4311 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4312 */ 4313 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4314 vm_map_unlock(map); 4315 vm_map_wire(map, grow_start, grow_start + grow_amount, 4316 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4317 vm_map_lock_read(map); 4318 } else 4319 vm_map_lock_downgrade(map); 4320 4321 out: 4322 #ifdef RACCT 4323 if (racct_enable && rv != KERN_SUCCESS) { 4324 PROC_LOCK(p); 4325 error = racct_set(p, RACCT_VMEM, map->size); 4326 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4327 if (!old_mlock) { 4328 error = racct_set(p, RACCT_MEMLOCK, 4329 ptoa(pmap_wired_count(map->pmap))); 4330 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4331 } 4332 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4333 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4334 PROC_UNLOCK(p); 4335 } 4336 #endif 4337 4338 return (rv); 4339 } 4340 4341 /* 4342 * Unshare the specified VM space for exec. If other processes are 4343 * mapped to it, then create a new one. The new vmspace is null. 4344 */ 4345 int 4346 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4347 { 4348 struct vmspace *oldvmspace = p->p_vmspace; 4349 struct vmspace *newvmspace; 4350 4351 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4352 ("vmspace_exec recursed")); 4353 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4354 if (newvmspace == NULL) 4355 return (ENOMEM); 4356 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4357 /* 4358 * This code is written like this for prototype purposes. The 4359 * goal is to avoid running down the vmspace here, but let the 4360 * other process's that are still using the vmspace to finally 4361 * run it down. Even though there is little or no chance of blocking 4362 * here, it is a good idea to keep this form for future mods. 4363 */ 4364 PROC_VMSPACE_LOCK(p); 4365 p->p_vmspace = newvmspace; 4366 PROC_VMSPACE_UNLOCK(p); 4367 if (p == curthread->td_proc) 4368 pmap_activate(curthread); 4369 curthread->td_pflags |= TDP_EXECVMSPC; 4370 return (0); 4371 } 4372 4373 /* 4374 * Unshare the specified VM space for forcing COW. This 4375 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4376 */ 4377 int 4378 vmspace_unshare(struct proc *p) 4379 { 4380 struct vmspace *oldvmspace = p->p_vmspace; 4381 struct vmspace *newvmspace; 4382 vm_ooffset_t fork_charge; 4383 4384 if (oldvmspace->vm_refcnt == 1) 4385 return (0); 4386 fork_charge = 0; 4387 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4388 if (newvmspace == NULL) 4389 return (ENOMEM); 4390 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4391 vmspace_free(newvmspace); 4392 return (ENOMEM); 4393 } 4394 PROC_VMSPACE_LOCK(p); 4395 p->p_vmspace = newvmspace; 4396 PROC_VMSPACE_UNLOCK(p); 4397 if (p == curthread->td_proc) 4398 pmap_activate(curthread); 4399 vmspace_free(oldvmspace); 4400 return (0); 4401 } 4402 4403 /* 4404 * vm_map_lookup: 4405 * 4406 * Finds the VM object, offset, and 4407 * protection for a given virtual address in the 4408 * specified map, assuming a page fault of the 4409 * type specified. 4410 * 4411 * Leaves the map in question locked for read; return 4412 * values are guaranteed until a vm_map_lookup_done 4413 * call is performed. Note that the map argument 4414 * is in/out; the returned map must be used in 4415 * the call to vm_map_lookup_done. 4416 * 4417 * A handle (out_entry) is returned for use in 4418 * vm_map_lookup_done, to make that fast. 4419 * 4420 * If a lookup is requested with "write protection" 4421 * specified, the map may be changed to perform virtual 4422 * copying operations, although the data referenced will 4423 * remain the same. 4424 */ 4425 int 4426 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4427 vm_offset_t vaddr, 4428 vm_prot_t fault_typea, 4429 vm_map_entry_t *out_entry, /* OUT */ 4430 vm_object_t *object, /* OUT */ 4431 vm_pindex_t *pindex, /* OUT */ 4432 vm_prot_t *out_prot, /* OUT */ 4433 boolean_t *wired) /* OUT */ 4434 { 4435 vm_map_entry_t entry; 4436 vm_map_t map = *var_map; 4437 vm_prot_t prot; 4438 vm_prot_t fault_type = fault_typea; 4439 vm_object_t eobject; 4440 vm_size_t size; 4441 struct ucred *cred; 4442 4443 RetryLookup: 4444 4445 vm_map_lock_read(map); 4446 4447 RetryLookupLocked: 4448 /* 4449 * Lookup the faulting address. 4450 */ 4451 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4452 vm_map_unlock_read(map); 4453 return (KERN_INVALID_ADDRESS); 4454 } 4455 4456 entry = *out_entry; 4457 4458 /* 4459 * Handle submaps. 4460 */ 4461 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4462 vm_map_t old_map = map; 4463 4464 *var_map = map = entry->object.sub_map; 4465 vm_map_unlock_read(old_map); 4466 goto RetryLookup; 4467 } 4468 4469 /* 4470 * Check whether this task is allowed to have this page. 4471 */ 4472 prot = entry->protection; 4473 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4474 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4475 if (prot == VM_PROT_NONE && map != kernel_map && 4476 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4477 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4478 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4479 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4480 goto RetryLookupLocked; 4481 } 4482 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4483 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4484 vm_map_unlock_read(map); 4485 return (KERN_PROTECTION_FAILURE); 4486 } 4487 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4488 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4489 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4490 ("entry %p flags %x", entry, entry->eflags)); 4491 if ((fault_typea & VM_PROT_COPY) != 0 && 4492 (entry->max_protection & VM_PROT_WRITE) == 0 && 4493 (entry->eflags & MAP_ENTRY_COW) == 0) { 4494 vm_map_unlock_read(map); 4495 return (KERN_PROTECTION_FAILURE); 4496 } 4497 4498 /* 4499 * If this page is not pageable, we have to get it for all possible 4500 * accesses. 4501 */ 4502 *wired = (entry->wired_count != 0); 4503 if (*wired) 4504 fault_type = entry->protection; 4505 size = entry->end - entry->start; 4506 /* 4507 * If the entry was copy-on-write, we either ... 4508 */ 4509 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4510 /* 4511 * If we want to write the page, we may as well handle that 4512 * now since we've got the map locked. 4513 * 4514 * If we don't need to write the page, we just demote the 4515 * permissions allowed. 4516 */ 4517 if ((fault_type & VM_PROT_WRITE) != 0 || 4518 (fault_typea & VM_PROT_COPY) != 0) { 4519 /* 4520 * Make a new object, and place it in the object 4521 * chain. Note that no new references have appeared 4522 * -- one just moved from the map to the new 4523 * object. 4524 */ 4525 if (vm_map_lock_upgrade(map)) 4526 goto RetryLookup; 4527 4528 if (entry->cred == NULL) { 4529 /* 4530 * The debugger owner is charged for 4531 * the memory. 4532 */ 4533 cred = curthread->td_ucred; 4534 crhold(cred); 4535 if (!swap_reserve_by_cred(size, cred)) { 4536 crfree(cred); 4537 vm_map_unlock(map); 4538 return (KERN_RESOURCE_SHORTAGE); 4539 } 4540 entry->cred = cred; 4541 } 4542 vm_object_shadow(&entry->object.vm_object, 4543 &entry->offset, size); 4544 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4545 eobject = entry->object.vm_object; 4546 if (eobject->cred != NULL) { 4547 /* 4548 * The object was not shadowed. 4549 */ 4550 swap_release_by_cred(size, entry->cred); 4551 crfree(entry->cred); 4552 entry->cred = NULL; 4553 } else if (entry->cred != NULL) { 4554 VM_OBJECT_WLOCK(eobject); 4555 eobject->cred = entry->cred; 4556 eobject->charge = size; 4557 VM_OBJECT_WUNLOCK(eobject); 4558 entry->cred = NULL; 4559 } 4560 4561 vm_map_lock_downgrade(map); 4562 } else { 4563 /* 4564 * We're attempting to read a copy-on-write page -- 4565 * don't allow writes. 4566 */ 4567 prot &= ~VM_PROT_WRITE; 4568 } 4569 } 4570 4571 /* 4572 * Create an object if necessary. 4573 */ 4574 if (entry->object.vm_object == NULL && 4575 !map->system_map) { 4576 if (vm_map_lock_upgrade(map)) 4577 goto RetryLookup; 4578 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4579 atop(size)); 4580 entry->offset = 0; 4581 if (entry->cred != NULL) { 4582 VM_OBJECT_WLOCK(entry->object.vm_object); 4583 entry->object.vm_object->cred = entry->cred; 4584 entry->object.vm_object->charge = size; 4585 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4586 entry->cred = NULL; 4587 } 4588 vm_map_lock_downgrade(map); 4589 } 4590 4591 /* 4592 * Return the object/offset from this entry. If the entry was 4593 * copy-on-write or empty, it has been fixed up. 4594 */ 4595 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4596 *object = entry->object.vm_object; 4597 4598 *out_prot = prot; 4599 return (KERN_SUCCESS); 4600 } 4601 4602 /* 4603 * vm_map_lookup_locked: 4604 * 4605 * Lookup the faulting address. A version of vm_map_lookup that returns 4606 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4607 */ 4608 int 4609 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4610 vm_offset_t vaddr, 4611 vm_prot_t fault_typea, 4612 vm_map_entry_t *out_entry, /* OUT */ 4613 vm_object_t *object, /* OUT */ 4614 vm_pindex_t *pindex, /* OUT */ 4615 vm_prot_t *out_prot, /* OUT */ 4616 boolean_t *wired) /* OUT */ 4617 { 4618 vm_map_entry_t entry; 4619 vm_map_t map = *var_map; 4620 vm_prot_t prot; 4621 vm_prot_t fault_type = fault_typea; 4622 4623 /* 4624 * Lookup the faulting address. 4625 */ 4626 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4627 return (KERN_INVALID_ADDRESS); 4628 4629 entry = *out_entry; 4630 4631 /* 4632 * Fail if the entry refers to a submap. 4633 */ 4634 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4635 return (KERN_FAILURE); 4636 4637 /* 4638 * Check whether this task is allowed to have this page. 4639 */ 4640 prot = entry->protection; 4641 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4642 if ((fault_type & prot) != fault_type) 4643 return (KERN_PROTECTION_FAILURE); 4644 4645 /* 4646 * If this page is not pageable, we have to get it for all possible 4647 * accesses. 4648 */ 4649 *wired = (entry->wired_count != 0); 4650 if (*wired) 4651 fault_type = entry->protection; 4652 4653 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4654 /* 4655 * Fail if the entry was copy-on-write for a write fault. 4656 */ 4657 if (fault_type & VM_PROT_WRITE) 4658 return (KERN_FAILURE); 4659 /* 4660 * We're attempting to read a copy-on-write page -- 4661 * don't allow writes. 4662 */ 4663 prot &= ~VM_PROT_WRITE; 4664 } 4665 4666 /* 4667 * Fail if an object should be created. 4668 */ 4669 if (entry->object.vm_object == NULL && !map->system_map) 4670 return (KERN_FAILURE); 4671 4672 /* 4673 * Return the object/offset from this entry. If the entry was 4674 * copy-on-write or empty, it has been fixed up. 4675 */ 4676 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4677 *object = entry->object.vm_object; 4678 4679 *out_prot = prot; 4680 return (KERN_SUCCESS); 4681 } 4682 4683 /* 4684 * vm_map_lookup_done: 4685 * 4686 * Releases locks acquired by a vm_map_lookup 4687 * (according to the handle returned by that lookup). 4688 */ 4689 void 4690 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4691 { 4692 /* 4693 * Unlock the main-level map 4694 */ 4695 vm_map_unlock_read(map); 4696 } 4697 4698 vm_offset_t 4699 vm_map_max_KBI(const struct vm_map *map) 4700 { 4701 4702 return (vm_map_max(map)); 4703 } 4704 4705 vm_offset_t 4706 vm_map_min_KBI(const struct vm_map *map) 4707 { 4708 4709 return (vm_map_min(map)); 4710 } 4711 4712 pmap_t 4713 vm_map_pmap_KBI(vm_map_t map) 4714 { 4715 4716 return (map->pmap); 4717 } 4718 4719 #include "opt_ddb.h" 4720 #ifdef DDB 4721 #include <sys/kernel.h> 4722 4723 #include <ddb/ddb.h> 4724 4725 static void 4726 vm_map_print(vm_map_t map) 4727 { 4728 vm_map_entry_t entry; 4729 4730 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4731 (void *)map, 4732 (void *)map->pmap, map->nentries, map->timestamp); 4733 4734 db_indent += 2; 4735 for (entry = map->header.next; entry != &map->header; 4736 entry = entry->next) { 4737 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4738 (void *)entry, (void *)entry->start, (void *)entry->end, 4739 entry->eflags); 4740 { 4741 static char *inheritance_name[4] = 4742 {"share", "copy", "none", "donate_copy"}; 4743 4744 db_iprintf(" prot=%x/%x/%s", 4745 entry->protection, 4746 entry->max_protection, 4747 inheritance_name[(int)(unsigned char)entry->inheritance]); 4748 if (entry->wired_count != 0) 4749 db_printf(", wired"); 4750 } 4751 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4752 db_printf(", share=%p, offset=0x%jx\n", 4753 (void *)entry->object.sub_map, 4754 (uintmax_t)entry->offset); 4755 if ((entry->prev == &map->header) || 4756 (entry->prev->object.sub_map != 4757 entry->object.sub_map)) { 4758 db_indent += 2; 4759 vm_map_print((vm_map_t)entry->object.sub_map); 4760 db_indent -= 2; 4761 } 4762 } else { 4763 if (entry->cred != NULL) 4764 db_printf(", ruid %d", entry->cred->cr_ruid); 4765 db_printf(", object=%p, offset=0x%jx", 4766 (void *)entry->object.vm_object, 4767 (uintmax_t)entry->offset); 4768 if (entry->object.vm_object && entry->object.vm_object->cred) 4769 db_printf(", obj ruid %d charge %jx", 4770 entry->object.vm_object->cred->cr_ruid, 4771 (uintmax_t)entry->object.vm_object->charge); 4772 if (entry->eflags & MAP_ENTRY_COW) 4773 db_printf(", copy (%s)", 4774 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4775 db_printf("\n"); 4776 4777 if ((entry->prev == &map->header) || 4778 (entry->prev->object.vm_object != 4779 entry->object.vm_object)) { 4780 db_indent += 2; 4781 vm_object_print((db_expr_t)(intptr_t) 4782 entry->object.vm_object, 4783 0, 0, (char *)0); 4784 db_indent -= 2; 4785 } 4786 } 4787 } 4788 db_indent -= 2; 4789 } 4790 4791 DB_SHOW_COMMAND(map, map) 4792 { 4793 4794 if (!have_addr) { 4795 db_printf("usage: show map <addr>\n"); 4796 return; 4797 } 4798 vm_map_print((vm_map_t)addr); 4799 } 4800 4801 DB_SHOW_COMMAND(procvm, procvm) 4802 { 4803 struct proc *p; 4804 4805 if (have_addr) { 4806 p = db_lookup_proc(addr); 4807 } else { 4808 p = curproc; 4809 } 4810 4811 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4812 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4813 (void *)vmspace_pmap(p->p_vmspace)); 4814 4815 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4816 } 4817 4818 #endif /* DDB */ 4819