1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/mman.h> 77 #include <sys/vnode.h> 78 #include <sys/racct.h> 79 #include <sys/resourcevar.h> 80 #include <sys/rwlock.h> 81 #include <sys/file.h> 82 #include <sys/sysctl.h> 83 #include <sys/sysent.h> 84 #include <sys/shm.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vnode_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/uma.h> 98 99 /* 100 * Virtual memory maps provide for the mapping, protection, 101 * and sharing of virtual memory objects. In addition, 102 * this module provides for an efficient virtual copy of 103 * memory from one map to another. 104 * 105 * Synchronization is required prior to most operations. 106 * 107 * Maps consist of an ordered doubly-linked list of simple 108 * entries; a self-adjusting binary search tree of these 109 * entries is used to speed up lookups. 110 * 111 * Since portions of maps are specified by start/end addresses, 112 * which may not align with existing map entries, all 113 * routines merely "clip" entries to these start/end values. 114 * [That is, an entry is split into two, bordering at a 115 * start or end value.] Note that these clippings may not 116 * always be necessary (as the two resulting entries are then 117 * not changed); however, the clipping is done for convenience. 118 * 119 * As mentioned above, virtual copy operations are performed 120 * by copying VM object references from one map to 121 * another, and then marking both regions as copy-on-write. 122 */ 123 124 static struct mtx map_sleep_mtx; 125 static uma_zone_t mapentzone; 126 static uma_zone_t kmapentzone; 127 static uma_zone_t mapzone; 128 static uma_zone_t vmspace_zone; 129 static int vmspace_zinit(void *mem, int size, int flags); 130 static int vm_map_zinit(void *mem, int ize, int flags); 131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 132 vm_offset_t max); 133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 135 #ifdef INVARIANTS 136 static void vm_map_zdtor(void *mem, int size, void *arg); 137 static void vmspace_zdtor(void *mem, int size, void *arg); 138 #endif 139 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 140 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 141 int cow); 142 143 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 144 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 145 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 146 147 /* 148 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 149 * stable. 150 */ 151 #define PROC_VMSPACE_LOCK(p) do { } while (0) 152 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 153 154 /* 155 * VM_MAP_RANGE_CHECK: [ internal use only ] 156 * 157 * Asserts that the starting and ending region 158 * addresses fall within the valid range of the map. 159 */ 160 #define VM_MAP_RANGE_CHECK(map, start, end) \ 161 { \ 162 if (start < vm_map_min(map)) \ 163 start = vm_map_min(map); \ 164 if (end > vm_map_max(map)) \ 165 end = vm_map_max(map); \ 166 if (start > end) \ 167 start = end; \ 168 } 169 170 /* 171 * vm_map_startup: 172 * 173 * Initialize the vm_map module. Must be called before 174 * any other vm_map routines. 175 * 176 * Map and entry structures are allocated from the general 177 * purpose memory pool with some exceptions: 178 * 179 * - The kernel map and kmem submap are allocated statically. 180 * - Kernel map entries are allocated out of a static pool. 181 * 182 * These restrictions are necessary since malloc() uses the 183 * maps and requires map entries. 184 */ 185 186 void 187 vm_map_startup(void) 188 { 189 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 190 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 191 #ifdef INVARIANTS 192 vm_map_zdtor, 193 #else 194 NULL, 195 #endif 196 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 197 uma_prealloc(mapzone, MAX_KMAP); 198 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 199 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 200 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 201 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 202 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 203 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 204 #ifdef INVARIANTS 205 vmspace_zdtor, 206 #else 207 NULL, 208 #endif 209 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 210 } 211 212 static int 213 vmspace_zinit(void *mem, int size, int flags) 214 { 215 struct vmspace *vm; 216 217 vm = (struct vmspace *)mem; 218 219 vm->vm_map.pmap = NULL; 220 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 221 PMAP_LOCK_INIT(vmspace_pmap(vm)); 222 return (0); 223 } 224 225 static int 226 vm_map_zinit(void *mem, int size, int flags) 227 { 228 vm_map_t map; 229 230 map = (vm_map_t)mem; 231 memset(map, 0, sizeof(*map)); 232 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 233 sx_init(&map->lock, "vm map (user)"); 234 return (0); 235 } 236 237 #ifdef INVARIANTS 238 static void 239 vmspace_zdtor(void *mem, int size, void *arg) 240 { 241 struct vmspace *vm; 242 243 vm = (struct vmspace *)mem; 244 245 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 246 } 247 static void 248 vm_map_zdtor(void *mem, int size, void *arg) 249 { 250 vm_map_t map; 251 252 map = (vm_map_t)mem; 253 KASSERT(map->nentries == 0, 254 ("map %p nentries == %d on free.", 255 map, map->nentries)); 256 KASSERT(map->size == 0, 257 ("map %p size == %lu on free.", 258 map, (unsigned long)map->size)); 259 } 260 #endif /* INVARIANTS */ 261 262 /* 263 * Allocate a vmspace structure, including a vm_map and pmap, 264 * and initialize those structures. The refcnt is set to 1. 265 * 266 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 267 */ 268 struct vmspace * 269 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 270 { 271 struct vmspace *vm; 272 273 vm = uma_zalloc(vmspace_zone, M_WAITOK); 274 275 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 276 277 if (pinit == NULL) 278 pinit = &pmap_pinit; 279 280 if (!pinit(vmspace_pmap(vm))) { 281 uma_zfree(vmspace_zone, vm); 282 return (NULL); 283 } 284 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 285 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 286 vm->vm_refcnt = 1; 287 vm->vm_shm = NULL; 288 vm->vm_swrss = 0; 289 vm->vm_tsize = 0; 290 vm->vm_dsize = 0; 291 vm->vm_ssize = 0; 292 vm->vm_taddr = 0; 293 vm->vm_daddr = 0; 294 vm->vm_maxsaddr = 0; 295 return (vm); 296 } 297 298 static void 299 vmspace_container_reset(struct proc *p) 300 { 301 302 #ifdef RACCT 303 PROC_LOCK(p); 304 racct_set(p, RACCT_DATA, 0); 305 racct_set(p, RACCT_STACK, 0); 306 racct_set(p, RACCT_RSS, 0); 307 racct_set(p, RACCT_MEMLOCK, 0); 308 racct_set(p, RACCT_VMEM, 0); 309 PROC_UNLOCK(p); 310 #endif 311 } 312 313 static inline void 314 vmspace_dofree(struct vmspace *vm) 315 { 316 317 CTR1(KTR_VM, "vmspace_free: %p", vm); 318 319 /* 320 * Make sure any SysV shm is freed, it might not have been in 321 * exit1(). 322 */ 323 shmexit(vm); 324 325 /* 326 * Lock the map, to wait out all other references to it. 327 * Delete all of the mappings and pages they hold, then call 328 * the pmap module to reclaim anything left. 329 */ 330 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 331 vm->vm_map.max_offset); 332 333 pmap_release(vmspace_pmap(vm)); 334 vm->vm_map.pmap = NULL; 335 uma_zfree(vmspace_zone, vm); 336 } 337 338 void 339 vmspace_free(struct vmspace *vm) 340 { 341 342 if (vm->vm_refcnt == 0) 343 panic("vmspace_free: attempt to free already freed vmspace"); 344 345 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 346 vmspace_dofree(vm); 347 } 348 349 void 350 vmspace_exitfree(struct proc *p) 351 { 352 struct vmspace *vm; 353 354 PROC_VMSPACE_LOCK(p); 355 vm = p->p_vmspace; 356 p->p_vmspace = NULL; 357 PROC_VMSPACE_UNLOCK(p); 358 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 359 vmspace_free(vm); 360 } 361 362 void 363 vmspace_exit(struct thread *td) 364 { 365 int refcnt; 366 struct vmspace *vm; 367 struct proc *p; 368 369 /* 370 * Release user portion of address space. 371 * This releases references to vnodes, 372 * which could cause I/O if the file has been unlinked. 373 * Need to do this early enough that we can still sleep. 374 * 375 * The last exiting process to reach this point releases as 376 * much of the environment as it can. vmspace_dofree() is the 377 * slower fallback in case another process had a temporary 378 * reference to the vmspace. 379 */ 380 381 p = td->td_proc; 382 vm = p->p_vmspace; 383 atomic_add_int(&vmspace0.vm_refcnt, 1); 384 do { 385 refcnt = vm->vm_refcnt; 386 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 387 /* Switch now since other proc might free vmspace */ 388 PROC_VMSPACE_LOCK(p); 389 p->p_vmspace = &vmspace0; 390 PROC_VMSPACE_UNLOCK(p); 391 pmap_activate(td); 392 } 393 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 394 if (refcnt == 1) { 395 if (p->p_vmspace != vm) { 396 /* vmspace not yet freed, switch back */ 397 PROC_VMSPACE_LOCK(p); 398 p->p_vmspace = vm; 399 PROC_VMSPACE_UNLOCK(p); 400 pmap_activate(td); 401 } 402 pmap_remove_pages(vmspace_pmap(vm)); 403 /* Switch now since this proc will free vmspace */ 404 PROC_VMSPACE_LOCK(p); 405 p->p_vmspace = &vmspace0; 406 PROC_VMSPACE_UNLOCK(p); 407 pmap_activate(td); 408 vmspace_dofree(vm); 409 } 410 vmspace_container_reset(p); 411 } 412 413 /* Acquire reference to vmspace owned by another process. */ 414 415 struct vmspace * 416 vmspace_acquire_ref(struct proc *p) 417 { 418 struct vmspace *vm; 419 int refcnt; 420 421 PROC_VMSPACE_LOCK(p); 422 vm = p->p_vmspace; 423 if (vm == NULL) { 424 PROC_VMSPACE_UNLOCK(p); 425 return (NULL); 426 } 427 do { 428 refcnt = vm->vm_refcnt; 429 if (refcnt <= 0) { /* Avoid 0->1 transition */ 430 PROC_VMSPACE_UNLOCK(p); 431 return (NULL); 432 } 433 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 434 if (vm != p->p_vmspace) { 435 PROC_VMSPACE_UNLOCK(p); 436 vmspace_free(vm); 437 return (NULL); 438 } 439 PROC_VMSPACE_UNLOCK(p); 440 return (vm); 441 } 442 443 void 444 _vm_map_lock(vm_map_t map, const char *file, int line) 445 { 446 447 if (map->system_map) 448 mtx_lock_flags_(&map->system_mtx, 0, file, line); 449 else 450 sx_xlock_(&map->lock, file, line); 451 map->timestamp++; 452 } 453 454 static void 455 vm_map_process_deferred(void) 456 { 457 struct thread *td; 458 vm_map_entry_t entry, next; 459 vm_object_t object; 460 461 td = curthread; 462 entry = td->td_map_def_user; 463 td->td_map_def_user = NULL; 464 while (entry != NULL) { 465 next = entry->next; 466 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 467 /* 468 * Decrement the object's writemappings and 469 * possibly the vnode's v_writecount. 470 */ 471 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 472 ("Submap with writecount")); 473 object = entry->object.vm_object; 474 KASSERT(object != NULL, ("No object for writecount")); 475 vnode_pager_release_writecount(object, entry->start, 476 entry->end); 477 } 478 vm_map_entry_deallocate(entry, FALSE); 479 entry = next; 480 } 481 } 482 483 void 484 _vm_map_unlock(vm_map_t map, const char *file, int line) 485 { 486 487 if (map->system_map) 488 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 489 else { 490 sx_xunlock_(&map->lock, file, line); 491 vm_map_process_deferred(); 492 } 493 } 494 495 void 496 _vm_map_lock_read(vm_map_t map, const char *file, int line) 497 { 498 499 if (map->system_map) 500 mtx_lock_flags_(&map->system_mtx, 0, file, line); 501 else 502 sx_slock_(&map->lock, file, line); 503 } 504 505 void 506 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 507 { 508 509 if (map->system_map) 510 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 511 else { 512 sx_sunlock_(&map->lock, file, line); 513 vm_map_process_deferred(); 514 } 515 } 516 517 int 518 _vm_map_trylock(vm_map_t map, const char *file, int line) 519 { 520 int error; 521 522 error = map->system_map ? 523 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 524 !sx_try_xlock_(&map->lock, file, line); 525 if (error == 0) 526 map->timestamp++; 527 return (error == 0); 528 } 529 530 int 531 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 532 { 533 int error; 534 535 error = map->system_map ? 536 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 537 !sx_try_slock_(&map->lock, file, line); 538 return (error == 0); 539 } 540 541 /* 542 * _vm_map_lock_upgrade: [ internal use only ] 543 * 544 * Tries to upgrade a read (shared) lock on the specified map to a write 545 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 546 * non-zero value if the upgrade fails. If the upgrade fails, the map is 547 * returned without a read or write lock held. 548 * 549 * Requires that the map be read locked. 550 */ 551 int 552 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 553 { 554 unsigned int last_timestamp; 555 556 if (map->system_map) { 557 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 558 } else { 559 if (!sx_try_upgrade_(&map->lock, file, line)) { 560 last_timestamp = map->timestamp; 561 sx_sunlock_(&map->lock, file, line); 562 vm_map_process_deferred(); 563 /* 564 * If the map's timestamp does not change while the 565 * map is unlocked, then the upgrade succeeds. 566 */ 567 sx_xlock_(&map->lock, file, line); 568 if (last_timestamp != map->timestamp) { 569 sx_xunlock_(&map->lock, file, line); 570 return (1); 571 } 572 } 573 } 574 map->timestamp++; 575 return (0); 576 } 577 578 void 579 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 580 { 581 582 if (map->system_map) { 583 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 584 } else 585 sx_downgrade_(&map->lock, file, line); 586 } 587 588 /* 589 * vm_map_locked: 590 * 591 * Returns a non-zero value if the caller holds a write (exclusive) lock 592 * on the specified map and the value "0" otherwise. 593 */ 594 int 595 vm_map_locked(vm_map_t map) 596 { 597 598 if (map->system_map) 599 return (mtx_owned(&map->system_mtx)); 600 else 601 return (sx_xlocked(&map->lock)); 602 } 603 604 #ifdef INVARIANTS 605 static void 606 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 607 { 608 609 if (map->system_map) 610 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 611 else 612 sx_assert_(&map->lock, SA_XLOCKED, file, line); 613 } 614 615 #define VM_MAP_ASSERT_LOCKED(map) \ 616 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 617 #else 618 #define VM_MAP_ASSERT_LOCKED(map) 619 #endif 620 621 /* 622 * _vm_map_unlock_and_wait: 623 * 624 * Atomically releases the lock on the specified map and puts the calling 625 * thread to sleep. The calling thread will remain asleep until either 626 * vm_map_wakeup() is performed on the map or the specified timeout is 627 * exceeded. 628 * 629 * WARNING! This function does not perform deferred deallocations of 630 * objects and map entries. Therefore, the calling thread is expected to 631 * reacquire the map lock after reawakening and later perform an ordinary 632 * unlock operation, such as vm_map_unlock(), before completing its 633 * operation on the map. 634 */ 635 int 636 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 637 { 638 639 mtx_lock(&map_sleep_mtx); 640 if (map->system_map) 641 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 642 else 643 sx_xunlock_(&map->lock, file, line); 644 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 645 timo)); 646 } 647 648 /* 649 * vm_map_wakeup: 650 * 651 * Awaken any threads that have slept on the map using 652 * vm_map_unlock_and_wait(). 653 */ 654 void 655 vm_map_wakeup(vm_map_t map) 656 { 657 658 /* 659 * Acquire and release map_sleep_mtx to prevent a wakeup() 660 * from being performed (and lost) between the map unlock 661 * and the msleep() in _vm_map_unlock_and_wait(). 662 */ 663 mtx_lock(&map_sleep_mtx); 664 mtx_unlock(&map_sleep_mtx); 665 wakeup(&map->root); 666 } 667 668 void 669 vm_map_busy(vm_map_t map) 670 { 671 672 VM_MAP_ASSERT_LOCKED(map); 673 map->busy++; 674 } 675 676 void 677 vm_map_unbusy(vm_map_t map) 678 { 679 680 VM_MAP_ASSERT_LOCKED(map); 681 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 682 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 683 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 684 wakeup(&map->busy); 685 } 686 } 687 688 void 689 vm_map_wait_busy(vm_map_t map) 690 { 691 692 VM_MAP_ASSERT_LOCKED(map); 693 while (map->busy) { 694 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 695 if (map->system_map) 696 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 697 else 698 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 699 } 700 map->timestamp++; 701 } 702 703 long 704 vmspace_resident_count(struct vmspace *vmspace) 705 { 706 return pmap_resident_count(vmspace_pmap(vmspace)); 707 } 708 709 /* 710 * vm_map_create: 711 * 712 * Creates and returns a new empty VM map with 713 * the given physical map structure, and having 714 * the given lower and upper address bounds. 715 */ 716 vm_map_t 717 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 718 { 719 vm_map_t result; 720 721 result = uma_zalloc(mapzone, M_WAITOK); 722 CTR1(KTR_VM, "vm_map_create: %p", result); 723 _vm_map_init(result, pmap, min, max); 724 return (result); 725 } 726 727 /* 728 * Initialize an existing vm_map structure 729 * such as that in the vmspace structure. 730 */ 731 static void 732 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 733 { 734 735 map->header.next = map->header.prev = &map->header; 736 map->needs_wakeup = FALSE; 737 map->system_map = 0; 738 map->pmap = pmap; 739 map->min_offset = min; 740 map->max_offset = max; 741 map->flags = 0; 742 map->root = NULL; 743 map->timestamp = 0; 744 map->busy = 0; 745 } 746 747 void 748 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 749 { 750 751 _vm_map_init(map, pmap, min, max); 752 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 753 sx_init(&map->lock, "user map"); 754 } 755 756 /* 757 * vm_map_entry_dispose: [ internal use only ] 758 * 759 * Inverse of vm_map_entry_create. 760 */ 761 static void 762 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 763 { 764 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 765 } 766 767 /* 768 * vm_map_entry_create: [ internal use only ] 769 * 770 * Allocates a VM map entry for insertion. 771 * No entry fields are filled in. 772 */ 773 static vm_map_entry_t 774 vm_map_entry_create(vm_map_t map) 775 { 776 vm_map_entry_t new_entry; 777 778 if (map->system_map) 779 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 780 else 781 new_entry = uma_zalloc(mapentzone, M_WAITOK); 782 if (new_entry == NULL) 783 panic("vm_map_entry_create: kernel resources exhausted"); 784 return (new_entry); 785 } 786 787 /* 788 * vm_map_entry_set_behavior: 789 * 790 * Set the expected access behavior, either normal, random, or 791 * sequential. 792 */ 793 static inline void 794 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 795 { 796 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 797 (behavior & MAP_ENTRY_BEHAV_MASK); 798 } 799 800 /* 801 * vm_map_entry_set_max_free: 802 * 803 * Set the max_free field in a vm_map_entry. 804 */ 805 static inline void 806 vm_map_entry_set_max_free(vm_map_entry_t entry) 807 { 808 809 entry->max_free = entry->adj_free; 810 if (entry->left != NULL && entry->left->max_free > entry->max_free) 811 entry->max_free = entry->left->max_free; 812 if (entry->right != NULL && entry->right->max_free > entry->max_free) 813 entry->max_free = entry->right->max_free; 814 } 815 816 /* 817 * vm_map_entry_splay: 818 * 819 * The Sleator and Tarjan top-down splay algorithm with the 820 * following variation. Max_free must be computed bottom-up, so 821 * on the downward pass, maintain the left and right spines in 822 * reverse order. Then, make a second pass up each side to fix 823 * the pointers and compute max_free. The time bound is O(log n) 824 * amortized. 825 * 826 * The new root is the vm_map_entry containing "addr", or else an 827 * adjacent entry (lower or higher) if addr is not in the tree. 828 * 829 * The map must be locked, and leaves it so. 830 * 831 * Returns: the new root. 832 */ 833 static vm_map_entry_t 834 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 835 { 836 vm_map_entry_t llist, rlist; 837 vm_map_entry_t ltree, rtree; 838 vm_map_entry_t y; 839 840 /* Special case of empty tree. */ 841 if (root == NULL) 842 return (root); 843 844 /* 845 * Pass One: Splay down the tree until we find addr or a NULL 846 * pointer where addr would go. llist and rlist are the two 847 * sides in reverse order (bottom-up), with llist linked by 848 * the right pointer and rlist linked by the left pointer in 849 * the vm_map_entry. Wait until Pass Two to set max_free on 850 * the two spines. 851 */ 852 llist = NULL; 853 rlist = NULL; 854 for (;;) { 855 /* root is never NULL in here. */ 856 if (addr < root->start) { 857 y = root->left; 858 if (y == NULL) 859 break; 860 if (addr < y->start && y->left != NULL) { 861 /* Rotate right and put y on rlist. */ 862 root->left = y->right; 863 y->right = root; 864 vm_map_entry_set_max_free(root); 865 root = y->left; 866 y->left = rlist; 867 rlist = y; 868 } else { 869 /* Put root on rlist. */ 870 root->left = rlist; 871 rlist = root; 872 root = y; 873 } 874 } else if (addr >= root->end) { 875 y = root->right; 876 if (y == NULL) 877 break; 878 if (addr >= y->end && y->right != NULL) { 879 /* Rotate left and put y on llist. */ 880 root->right = y->left; 881 y->left = root; 882 vm_map_entry_set_max_free(root); 883 root = y->right; 884 y->right = llist; 885 llist = y; 886 } else { 887 /* Put root on llist. */ 888 root->right = llist; 889 llist = root; 890 root = y; 891 } 892 } else 893 break; 894 } 895 896 /* 897 * Pass Two: Walk back up the two spines, flip the pointers 898 * and set max_free. The subtrees of the root go at the 899 * bottom of llist and rlist. 900 */ 901 ltree = root->left; 902 while (llist != NULL) { 903 y = llist->right; 904 llist->right = ltree; 905 vm_map_entry_set_max_free(llist); 906 ltree = llist; 907 llist = y; 908 } 909 rtree = root->right; 910 while (rlist != NULL) { 911 y = rlist->left; 912 rlist->left = rtree; 913 vm_map_entry_set_max_free(rlist); 914 rtree = rlist; 915 rlist = y; 916 } 917 918 /* 919 * Final assembly: add ltree and rtree as subtrees of root. 920 */ 921 root->left = ltree; 922 root->right = rtree; 923 vm_map_entry_set_max_free(root); 924 925 return (root); 926 } 927 928 /* 929 * vm_map_entry_{un,}link: 930 * 931 * Insert/remove entries from maps. 932 */ 933 static void 934 vm_map_entry_link(vm_map_t map, 935 vm_map_entry_t after_where, 936 vm_map_entry_t entry) 937 { 938 939 CTR4(KTR_VM, 940 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 941 map->nentries, entry, after_where); 942 VM_MAP_ASSERT_LOCKED(map); 943 KASSERT(after_where == &map->header || 944 after_where->end <= entry->start, 945 ("vm_map_entry_link: prev end %jx new start %jx overlap", 946 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 947 KASSERT(after_where->next == &map->header || 948 entry->end <= after_where->next->start, 949 ("vm_map_entry_link: new end %jx next start %jx overlap", 950 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 951 952 map->nentries++; 953 entry->prev = after_where; 954 entry->next = after_where->next; 955 entry->next->prev = entry; 956 after_where->next = entry; 957 958 if (after_where != &map->header) { 959 if (after_where != map->root) 960 vm_map_entry_splay(after_where->start, map->root); 961 entry->right = after_where->right; 962 entry->left = after_where; 963 after_where->right = NULL; 964 after_where->adj_free = entry->start - after_where->end; 965 vm_map_entry_set_max_free(after_where); 966 } else { 967 entry->right = map->root; 968 entry->left = NULL; 969 } 970 entry->adj_free = (entry->next == &map->header ? map->max_offset : 971 entry->next->start) - entry->end; 972 vm_map_entry_set_max_free(entry); 973 map->root = entry; 974 } 975 976 static void 977 vm_map_entry_unlink(vm_map_t map, 978 vm_map_entry_t entry) 979 { 980 vm_map_entry_t next, prev, root; 981 982 VM_MAP_ASSERT_LOCKED(map); 983 if (entry != map->root) 984 vm_map_entry_splay(entry->start, map->root); 985 if (entry->left == NULL) 986 root = entry->right; 987 else { 988 root = vm_map_entry_splay(entry->start, entry->left); 989 root->right = entry->right; 990 root->adj_free = (entry->next == &map->header ? map->max_offset : 991 entry->next->start) - root->end; 992 vm_map_entry_set_max_free(root); 993 } 994 map->root = root; 995 996 prev = entry->prev; 997 next = entry->next; 998 next->prev = prev; 999 prev->next = next; 1000 map->nentries--; 1001 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1002 map->nentries, entry); 1003 } 1004 1005 /* 1006 * vm_map_entry_resize_free: 1007 * 1008 * Recompute the amount of free space following a vm_map_entry 1009 * and propagate that value up the tree. Call this function after 1010 * resizing a map entry in-place, that is, without a call to 1011 * vm_map_entry_link() or _unlink(). 1012 * 1013 * The map must be locked, and leaves it so. 1014 */ 1015 static void 1016 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1017 { 1018 1019 /* 1020 * Using splay trees without parent pointers, propagating 1021 * max_free up the tree is done by moving the entry to the 1022 * root and making the change there. 1023 */ 1024 if (entry != map->root) 1025 map->root = vm_map_entry_splay(entry->start, map->root); 1026 1027 entry->adj_free = (entry->next == &map->header ? map->max_offset : 1028 entry->next->start) - entry->end; 1029 vm_map_entry_set_max_free(entry); 1030 } 1031 1032 /* 1033 * vm_map_lookup_entry: [ internal use only ] 1034 * 1035 * Finds the map entry containing (or 1036 * immediately preceding) the specified address 1037 * in the given map; the entry is returned 1038 * in the "entry" parameter. The boolean 1039 * result indicates whether the address is 1040 * actually contained in the map. 1041 */ 1042 boolean_t 1043 vm_map_lookup_entry( 1044 vm_map_t map, 1045 vm_offset_t address, 1046 vm_map_entry_t *entry) /* OUT */ 1047 { 1048 vm_map_entry_t cur; 1049 boolean_t locked; 1050 1051 /* 1052 * If the map is empty, then the map entry immediately preceding 1053 * "address" is the map's header. 1054 */ 1055 cur = map->root; 1056 if (cur == NULL) 1057 *entry = &map->header; 1058 else if (address >= cur->start && cur->end > address) { 1059 *entry = cur; 1060 return (TRUE); 1061 } else if ((locked = vm_map_locked(map)) || 1062 sx_try_upgrade(&map->lock)) { 1063 /* 1064 * Splay requires a write lock on the map. However, it only 1065 * restructures the binary search tree; it does not otherwise 1066 * change the map. Thus, the map's timestamp need not change 1067 * on a temporary upgrade. 1068 */ 1069 map->root = cur = vm_map_entry_splay(address, cur); 1070 if (!locked) 1071 sx_downgrade(&map->lock); 1072 1073 /* 1074 * If "address" is contained within a map entry, the new root 1075 * is that map entry. Otherwise, the new root is a map entry 1076 * immediately before or after "address". 1077 */ 1078 if (address >= cur->start) { 1079 *entry = cur; 1080 if (cur->end > address) 1081 return (TRUE); 1082 } else 1083 *entry = cur->prev; 1084 } else 1085 /* 1086 * Since the map is only locked for read access, perform a 1087 * standard binary search tree lookup for "address". 1088 */ 1089 for (;;) { 1090 if (address < cur->start) { 1091 if (cur->left == NULL) { 1092 *entry = cur->prev; 1093 break; 1094 } 1095 cur = cur->left; 1096 } else if (cur->end > address) { 1097 *entry = cur; 1098 return (TRUE); 1099 } else { 1100 if (cur->right == NULL) { 1101 *entry = cur; 1102 break; 1103 } 1104 cur = cur->right; 1105 } 1106 } 1107 return (FALSE); 1108 } 1109 1110 /* 1111 * vm_map_insert: 1112 * 1113 * Inserts the given whole VM object into the target 1114 * map at the specified address range. The object's 1115 * size should match that of the address range. 1116 * 1117 * Requires that the map be locked, and leaves it so. 1118 * 1119 * If object is non-NULL, ref count must be bumped by caller 1120 * prior to making call to account for the new entry. 1121 */ 1122 int 1123 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1124 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1125 { 1126 vm_map_entry_t new_entry, prev_entry, temp_entry; 1127 vm_eflags_t protoeflags; 1128 struct ucred *cred; 1129 vm_inherit_t inheritance; 1130 boolean_t charge_prev_obj; 1131 1132 VM_MAP_ASSERT_LOCKED(map); 1133 KASSERT((object != kmem_object && object != kernel_object) || 1134 (cow & MAP_COPY_ON_WRITE) == 0, 1135 ("vm_map_insert: kmem or kernel object and COW")); 1136 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1137 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1138 1139 /* 1140 * Check that the start and end points are not bogus. 1141 */ 1142 if ((start < map->min_offset) || (end > map->max_offset) || 1143 (start >= end)) 1144 return (KERN_INVALID_ADDRESS); 1145 1146 /* 1147 * Find the entry prior to the proposed starting address; if it's part 1148 * of an existing entry, this range is bogus. 1149 */ 1150 if (vm_map_lookup_entry(map, start, &temp_entry)) 1151 return (KERN_NO_SPACE); 1152 1153 prev_entry = temp_entry; 1154 1155 /* 1156 * Assert that the next entry doesn't overlap the end point. 1157 */ 1158 if ((prev_entry->next != &map->header) && 1159 (prev_entry->next->start < end)) 1160 return (KERN_NO_SPACE); 1161 1162 protoeflags = 0; 1163 if (cow & MAP_COPY_ON_WRITE) 1164 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1165 if (cow & MAP_NOFAULT) 1166 protoeflags |= MAP_ENTRY_NOFAULT; 1167 if (cow & MAP_DISABLE_SYNCER) 1168 protoeflags |= MAP_ENTRY_NOSYNC; 1169 if (cow & MAP_DISABLE_COREDUMP) 1170 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1171 if (cow & MAP_STACK_GROWS_DOWN) 1172 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1173 if (cow & MAP_STACK_GROWS_UP) 1174 protoeflags |= MAP_ENTRY_GROWS_UP; 1175 if (cow & MAP_VN_WRITECOUNT) 1176 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1177 if (cow & MAP_INHERIT_SHARE) 1178 inheritance = VM_INHERIT_SHARE; 1179 else 1180 inheritance = VM_INHERIT_DEFAULT; 1181 1182 cred = NULL; 1183 charge_prev_obj = FALSE; 1184 if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) 1185 goto charged; 1186 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1187 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1188 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1189 return (KERN_RESOURCE_SHORTAGE); 1190 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || 1191 object->cred == NULL, 1192 ("OVERCOMMIT: vm_map_insert o %p", object)); 1193 cred = curthread->td_ucred; 1194 crhold(cred); 1195 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY)) 1196 charge_prev_obj = TRUE; 1197 } 1198 1199 charged: 1200 /* Expand the kernel pmap, if necessary. */ 1201 if (map == kernel_map && end > kernel_vm_end) 1202 pmap_growkernel(end); 1203 if (object != NULL) { 1204 /* 1205 * OBJ_ONEMAPPING must be cleared unless this mapping 1206 * is trivially proven to be the only mapping for any 1207 * of the object's pages. (Object granularity 1208 * reference counting is insufficient to recognize 1209 * aliases with precision.) 1210 */ 1211 VM_OBJECT_WLOCK(object); 1212 if (object->ref_count > 1 || object->shadow_count != 0) 1213 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1214 VM_OBJECT_WUNLOCK(object); 1215 } 1216 else if ((prev_entry != &map->header) && 1217 (prev_entry->eflags == protoeflags) && 1218 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1219 (prev_entry->end == start) && 1220 (prev_entry->wired_count == 0) && 1221 (prev_entry->cred == cred || 1222 (prev_entry->object.vm_object != NULL && 1223 (prev_entry->object.vm_object->cred == cred))) && 1224 vm_object_coalesce(prev_entry->object.vm_object, 1225 prev_entry->offset, 1226 (vm_size_t)(prev_entry->end - prev_entry->start), 1227 (vm_size_t)(end - prev_entry->end), charge_prev_obj)) { 1228 /* 1229 * We were able to extend the object. Determine if we 1230 * can extend the previous map entry to include the 1231 * new range as well. 1232 */ 1233 if ((prev_entry->inheritance == inheritance) && 1234 (prev_entry->protection == prot) && 1235 (prev_entry->max_protection == max)) { 1236 map->size += (end - prev_entry->end); 1237 prev_entry->end = end; 1238 vm_map_entry_resize_free(map, prev_entry); 1239 vm_map_simplify_entry(map, prev_entry); 1240 if (cred != NULL) 1241 crfree(cred); 1242 return (KERN_SUCCESS); 1243 } 1244 1245 /* 1246 * If we can extend the object but cannot extend the 1247 * map entry, we have to create a new map entry. We 1248 * must bump the ref count on the extended object to 1249 * account for it. object may be NULL. 1250 */ 1251 object = prev_entry->object.vm_object; 1252 offset = prev_entry->offset + 1253 (prev_entry->end - prev_entry->start); 1254 vm_object_reference(object); 1255 if (cred != NULL && object != NULL && object->cred != NULL && 1256 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1257 /* Object already accounts for this uid. */ 1258 crfree(cred); 1259 cred = NULL; 1260 } 1261 } 1262 1263 /* 1264 * Create a new entry 1265 */ 1266 new_entry = vm_map_entry_create(map); 1267 new_entry->start = start; 1268 new_entry->end = end; 1269 new_entry->cred = NULL; 1270 1271 new_entry->eflags = protoeflags; 1272 new_entry->object.vm_object = object; 1273 new_entry->offset = offset; 1274 new_entry->avail_ssize = 0; 1275 1276 new_entry->inheritance = inheritance; 1277 new_entry->protection = prot; 1278 new_entry->max_protection = max; 1279 new_entry->wired_count = 0; 1280 new_entry->wiring_thread = NULL; 1281 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1282 new_entry->next_read = OFF_TO_IDX(offset); 1283 1284 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1285 ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); 1286 new_entry->cred = cred; 1287 1288 /* 1289 * Insert the new entry into the list 1290 */ 1291 vm_map_entry_link(map, prev_entry, new_entry); 1292 map->size += new_entry->end - new_entry->start; 1293 1294 /* 1295 * Try to coalesce the new entry with both the previous and next 1296 * entries in the list. Previously, we only attempted to coalesce 1297 * with the previous entry when object is NULL. Here, we handle the 1298 * other cases, which are less common. 1299 */ 1300 vm_map_simplify_entry(map, new_entry); 1301 1302 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 1303 vm_map_pmap_enter(map, start, prot, 1304 object, OFF_TO_IDX(offset), end - start, 1305 cow & MAP_PREFAULT_PARTIAL); 1306 } 1307 1308 return (KERN_SUCCESS); 1309 } 1310 1311 /* 1312 * vm_map_findspace: 1313 * 1314 * Find the first fit (lowest VM address) for "length" free bytes 1315 * beginning at address >= start in the given map. 1316 * 1317 * In a vm_map_entry, "adj_free" is the amount of free space 1318 * adjacent (higher address) to this entry, and "max_free" is the 1319 * maximum amount of contiguous free space in its subtree. This 1320 * allows finding a free region in one path down the tree, so 1321 * O(log n) amortized with splay trees. 1322 * 1323 * The map must be locked, and leaves it so. 1324 * 1325 * Returns: 0 on success, and starting address in *addr, 1326 * 1 if insufficient space. 1327 */ 1328 int 1329 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1330 vm_offset_t *addr) /* OUT */ 1331 { 1332 vm_map_entry_t entry; 1333 vm_offset_t st; 1334 1335 /* 1336 * Request must fit within min/max VM address and must avoid 1337 * address wrap. 1338 */ 1339 if (start < map->min_offset) 1340 start = map->min_offset; 1341 if (start + length > map->max_offset || start + length < start) 1342 return (1); 1343 1344 /* Empty tree means wide open address space. */ 1345 if (map->root == NULL) { 1346 *addr = start; 1347 return (0); 1348 } 1349 1350 /* 1351 * After splay, if start comes before root node, then there 1352 * must be a gap from start to the root. 1353 */ 1354 map->root = vm_map_entry_splay(start, map->root); 1355 if (start + length <= map->root->start) { 1356 *addr = start; 1357 return (0); 1358 } 1359 1360 /* 1361 * Root is the last node that might begin its gap before 1362 * start, and this is the last comparison where address 1363 * wrap might be a problem. 1364 */ 1365 st = (start > map->root->end) ? start : map->root->end; 1366 if (length <= map->root->end + map->root->adj_free - st) { 1367 *addr = st; 1368 return (0); 1369 } 1370 1371 /* With max_free, can immediately tell if no solution. */ 1372 entry = map->root->right; 1373 if (entry == NULL || length > entry->max_free) 1374 return (1); 1375 1376 /* 1377 * Search the right subtree in the order: left subtree, root, 1378 * right subtree (first fit). The previous splay implies that 1379 * all regions in the right subtree have addresses > start. 1380 */ 1381 while (entry != NULL) { 1382 if (entry->left != NULL && entry->left->max_free >= length) 1383 entry = entry->left; 1384 else if (entry->adj_free >= length) { 1385 *addr = entry->end; 1386 return (0); 1387 } else 1388 entry = entry->right; 1389 } 1390 1391 /* Can't get here, so panic if we do. */ 1392 panic("vm_map_findspace: max_free corrupt"); 1393 } 1394 1395 int 1396 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1397 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1398 vm_prot_t max, int cow) 1399 { 1400 vm_offset_t end; 1401 int result; 1402 1403 end = start + length; 1404 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1405 object == NULL, 1406 ("vm_map_fixed: non-NULL backing object for stack")); 1407 vm_map_lock(map); 1408 VM_MAP_RANGE_CHECK(map, start, end); 1409 if ((cow & MAP_CHECK_EXCL) == 0) 1410 vm_map_delete(map, start, end); 1411 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1412 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1413 prot, max, cow); 1414 } else { 1415 result = vm_map_insert(map, object, offset, start, end, 1416 prot, max, cow); 1417 } 1418 vm_map_unlock(map); 1419 return (result); 1420 } 1421 1422 /* 1423 * vm_map_find finds an unallocated region in the target address 1424 * map with the given length. The search is defined to be 1425 * first-fit from the specified address; the region found is 1426 * returned in the same parameter. 1427 * 1428 * If object is non-NULL, ref count must be bumped by caller 1429 * prior to making call to account for the new entry. 1430 */ 1431 int 1432 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1433 vm_offset_t *addr, /* IN/OUT */ 1434 vm_size_t length, vm_offset_t max_addr, int find_space, 1435 vm_prot_t prot, vm_prot_t max, int cow) 1436 { 1437 vm_offset_t alignment, initial_addr, start; 1438 int result; 1439 1440 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1441 object == NULL, 1442 ("vm_map_find: non-NULL backing object for stack")); 1443 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1444 (object->flags & OBJ_COLORED) == 0)) 1445 find_space = VMFS_ANY_SPACE; 1446 if (find_space >> 8 != 0) { 1447 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1448 alignment = (vm_offset_t)1 << (find_space >> 8); 1449 } else 1450 alignment = 0; 1451 initial_addr = *addr; 1452 again: 1453 start = initial_addr; 1454 vm_map_lock(map); 1455 do { 1456 if (find_space != VMFS_NO_SPACE) { 1457 if (vm_map_findspace(map, start, length, addr) || 1458 (max_addr != 0 && *addr + length > max_addr)) { 1459 vm_map_unlock(map); 1460 if (find_space == VMFS_OPTIMAL_SPACE) { 1461 find_space = VMFS_ANY_SPACE; 1462 goto again; 1463 } 1464 return (KERN_NO_SPACE); 1465 } 1466 switch (find_space) { 1467 case VMFS_SUPER_SPACE: 1468 case VMFS_OPTIMAL_SPACE: 1469 pmap_align_superpage(object, offset, addr, 1470 length); 1471 break; 1472 case VMFS_ANY_SPACE: 1473 break; 1474 default: 1475 if ((*addr & (alignment - 1)) != 0) { 1476 *addr &= ~(alignment - 1); 1477 *addr += alignment; 1478 } 1479 break; 1480 } 1481 1482 start = *addr; 1483 } 1484 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1485 result = vm_map_stack_locked(map, start, length, 1486 sgrowsiz, prot, max, cow); 1487 } else { 1488 result = vm_map_insert(map, object, offset, start, 1489 start + length, prot, max, cow); 1490 } 1491 } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && 1492 find_space != VMFS_ANY_SPACE); 1493 vm_map_unlock(map); 1494 return (result); 1495 } 1496 1497 /* 1498 * vm_map_simplify_entry: 1499 * 1500 * Simplify the given map entry by merging with either neighbor. This 1501 * routine also has the ability to merge with both neighbors. 1502 * 1503 * The map must be locked. 1504 * 1505 * This routine guarentees that the passed entry remains valid (though 1506 * possibly extended). When merging, this routine may delete one or 1507 * both neighbors. 1508 */ 1509 void 1510 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1511 { 1512 vm_map_entry_t next, prev; 1513 vm_size_t prevsize, esize; 1514 1515 if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1516 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1517 return; 1518 1519 prev = entry->prev; 1520 if (prev != &map->header) { 1521 prevsize = prev->end - prev->start; 1522 if ( (prev->end == entry->start) && 1523 (prev->object.vm_object == entry->object.vm_object) && 1524 (!prev->object.vm_object || 1525 (prev->offset + prevsize == entry->offset)) && 1526 (prev->eflags == entry->eflags) && 1527 (prev->protection == entry->protection) && 1528 (prev->max_protection == entry->max_protection) && 1529 (prev->inheritance == entry->inheritance) && 1530 (prev->wired_count == entry->wired_count) && 1531 (prev->cred == entry->cred)) { 1532 vm_map_entry_unlink(map, prev); 1533 entry->start = prev->start; 1534 entry->offset = prev->offset; 1535 if (entry->prev != &map->header) 1536 vm_map_entry_resize_free(map, entry->prev); 1537 1538 /* 1539 * If the backing object is a vnode object, 1540 * vm_object_deallocate() calls vrele(). 1541 * However, vrele() does not lock the vnode 1542 * because the vnode has additional 1543 * references. Thus, the map lock can be kept 1544 * without causing a lock-order reversal with 1545 * the vnode lock. 1546 * 1547 * Since we count the number of virtual page 1548 * mappings in object->un_pager.vnp.writemappings, 1549 * the writemappings value should not be adjusted 1550 * when the entry is disposed of. 1551 */ 1552 if (prev->object.vm_object) 1553 vm_object_deallocate(prev->object.vm_object); 1554 if (prev->cred != NULL) 1555 crfree(prev->cred); 1556 vm_map_entry_dispose(map, prev); 1557 } 1558 } 1559 1560 next = entry->next; 1561 if (next != &map->header) { 1562 esize = entry->end - entry->start; 1563 if ((entry->end == next->start) && 1564 (next->object.vm_object == entry->object.vm_object) && 1565 (!entry->object.vm_object || 1566 (entry->offset + esize == next->offset)) && 1567 (next->eflags == entry->eflags) && 1568 (next->protection == entry->protection) && 1569 (next->max_protection == entry->max_protection) && 1570 (next->inheritance == entry->inheritance) && 1571 (next->wired_count == entry->wired_count) && 1572 (next->cred == entry->cred)) { 1573 vm_map_entry_unlink(map, next); 1574 entry->end = next->end; 1575 vm_map_entry_resize_free(map, entry); 1576 1577 /* 1578 * See comment above. 1579 */ 1580 if (next->object.vm_object) 1581 vm_object_deallocate(next->object.vm_object); 1582 if (next->cred != NULL) 1583 crfree(next->cred); 1584 vm_map_entry_dispose(map, next); 1585 } 1586 } 1587 } 1588 /* 1589 * vm_map_clip_start: [ internal use only ] 1590 * 1591 * Asserts that the given entry begins at or after 1592 * the specified address; if necessary, 1593 * it splits the entry into two. 1594 */ 1595 #define vm_map_clip_start(map, entry, startaddr) \ 1596 { \ 1597 if (startaddr > entry->start) \ 1598 _vm_map_clip_start(map, entry, startaddr); \ 1599 } 1600 1601 /* 1602 * This routine is called only when it is known that 1603 * the entry must be split. 1604 */ 1605 static void 1606 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1607 { 1608 vm_map_entry_t new_entry; 1609 1610 VM_MAP_ASSERT_LOCKED(map); 1611 1612 /* 1613 * Split off the front portion -- note that we must insert the new 1614 * entry BEFORE this one, so that this entry has the specified 1615 * starting address. 1616 */ 1617 vm_map_simplify_entry(map, entry); 1618 1619 /* 1620 * If there is no object backing this entry, we might as well create 1621 * one now. If we defer it, an object can get created after the map 1622 * is clipped, and individual objects will be created for the split-up 1623 * map. This is a bit of a hack, but is also about the best place to 1624 * put this improvement. 1625 */ 1626 if (entry->object.vm_object == NULL && !map->system_map) { 1627 vm_object_t object; 1628 object = vm_object_allocate(OBJT_DEFAULT, 1629 atop(entry->end - entry->start)); 1630 entry->object.vm_object = object; 1631 entry->offset = 0; 1632 if (entry->cred != NULL) { 1633 object->cred = entry->cred; 1634 object->charge = entry->end - entry->start; 1635 entry->cred = NULL; 1636 } 1637 } else if (entry->object.vm_object != NULL && 1638 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1639 entry->cred != NULL) { 1640 VM_OBJECT_WLOCK(entry->object.vm_object); 1641 KASSERT(entry->object.vm_object->cred == NULL, 1642 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1643 entry->object.vm_object->cred = entry->cred; 1644 entry->object.vm_object->charge = entry->end - entry->start; 1645 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1646 entry->cred = NULL; 1647 } 1648 1649 new_entry = vm_map_entry_create(map); 1650 *new_entry = *entry; 1651 1652 new_entry->end = start; 1653 entry->offset += (start - entry->start); 1654 entry->start = start; 1655 if (new_entry->cred != NULL) 1656 crhold(entry->cred); 1657 1658 vm_map_entry_link(map, entry->prev, new_entry); 1659 1660 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1661 vm_object_reference(new_entry->object.vm_object); 1662 /* 1663 * The object->un_pager.vnp.writemappings for the 1664 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1665 * kept as is here. The virtual pages are 1666 * re-distributed among the clipped entries, so the sum is 1667 * left the same. 1668 */ 1669 } 1670 } 1671 1672 /* 1673 * vm_map_clip_end: [ internal use only ] 1674 * 1675 * Asserts that the given entry ends at or before 1676 * the specified address; if necessary, 1677 * it splits the entry into two. 1678 */ 1679 #define vm_map_clip_end(map, entry, endaddr) \ 1680 { \ 1681 if ((endaddr) < (entry->end)) \ 1682 _vm_map_clip_end((map), (entry), (endaddr)); \ 1683 } 1684 1685 /* 1686 * This routine is called only when it is known that 1687 * the entry must be split. 1688 */ 1689 static void 1690 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1691 { 1692 vm_map_entry_t new_entry; 1693 1694 VM_MAP_ASSERT_LOCKED(map); 1695 1696 /* 1697 * If there is no object backing this entry, we might as well create 1698 * one now. If we defer it, an object can get created after the map 1699 * is clipped, and individual objects will be created for the split-up 1700 * map. This is a bit of a hack, but is also about the best place to 1701 * put this improvement. 1702 */ 1703 if (entry->object.vm_object == NULL && !map->system_map) { 1704 vm_object_t object; 1705 object = vm_object_allocate(OBJT_DEFAULT, 1706 atop(entry->end - entry->start)); 1707 entry->object.vm_object = object; 1708 entry->offset = 0; 1709 if (entry->cred != NULL) { 1710 object->cred = entry->cred; 1711 object->charge = entry->end - entry->start; 1712 entry->cred = NULL; 1713 } 1714 } else if (entry->object.vm_object != NULL && 1715 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1716 entry->cred != NULL) { 1717 VM_OBJECT_WLOCK(entry->object.vm_object); 1718 KASSERT(entry->object.vm_object->cred == NULL, 1719 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1720 entry->object.vm_object->cred = entry->cred; 1721 entry->object.vm_object->charge = entry->end - entry->start; 1722 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1723 entry->cred = NULL; 1724 } 1725 1726 /* 1727 * Create a new entry and insert it AFTER the specified entry 1728 */ 1729 new_entry = vm_map_entry_create(map); 1730 *new_entry = *entry; 1731 1732 new_entry->start = entry->end = end; 1733 new_entry->offset += (end - entry->start); 1734 if (new_entry->cred != NULL) 1735 crhold(entry->cred); 1736 1737 vm_map_entry_link(map, entry, new_entry); 1738 1739 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1740 vm_object_reference(new_entry->object.vm_object); 1741 } 1742 } 1743 1744 /* 1745 * vm_map_submap: [ kernel use only ] 1746 * 1747 * Mark the given range as handled by a subordinate map. 1748 * 1749 * This range must have been created with vm_map_find, 1750 * and no other operations may have been performed on this 1751 * range prior to calling vm_map_submap. 1752 * 1753 * Only a limited number of operations can be performed 1754 * within this rage after calling vm_map_submap: 1755 * vm_fault 1756 * [Don't try vm_map_copy!] 1757 * 1758 * To remove a submapping, one must first remove the 1759 * range from the superior map, and then destroy the 1760 * submap (if desired). [Better yet, don't try it.] 1761 */ 1762 int 1763 vm_map_submap( 1764 vm_map_t map, 1765 vm_offset_t start, 1766 vm_offset_t end, 1767 vm_map_t submap) 1768 { 1769 vm_map_entry_t entry; 1770 int result = KERN_INVALID_ARGUMENT; 1771 1772 vm_map_lock(map); 1773 1774 VM_MAP_RANGE_CHECK(map, start, end); 1775 1776 if (vm_map_lookup_entry(map, start, &entry)) { 1777 vm_map_clip_start(map, entry, start); 1778 } else 1779 entry = entry->next; 1780 1781 vm_map_clip_end(map, entry, end); 1782 1783 if ((entry->start == start) && (entry->end == end) && 1784 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1785 (entry->object.vm_object == NULL)) { 1786 entry->object.sub_map = submap; 1787 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1788 result = KERN_SUCCESS; 1789 } 1790 vm_map_unlock(map); 1791 1792 return (result); 1793 } 1794 1795 /* 1796 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1797 */ 1798 #define MAX_INIT_PT 96 1799 1800 /* 1801 * vm_map_pmap_enter: 1802 * 1803 * Preload the specified map's pmap with mappings to the specified 1804 * object's memory-resident pages. No further physical pages are 1805 * allocated, and no further virtual pages are retrieved from secondary 1806 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1807 * limited number of page mappings are created at the low-end of the 1808 * specified address range. (For this purpose, a superpage mapping 1809 * counts as one page mapping.) Otherwise, all resident pages within 1810 * the specified address range are mapped. Because these mappings are 1811 * being created speculatively, cached pages are not reactivated and 1812 * mapped. 1813 */ 1814 void 1815 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1816 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1817 { 1818 vm_offset_t start; 1819 vm_page_t p, p_start; 1820 vm_pindex_t mask, psize, threshold, tmpidx; 1821 1822 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1823 return; 1824 VM_OBJECT_RLOCK(object); 1825 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1826 VM_OBJECT_RUNLOCK(object); 1827 VM_OBJECT_WLOCK(object); 1828 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1829 pmap_object_init_pt(map->pmap, addr, object, pindex, 1830 size); 1831 VM_OBJECT_WUNLOCK(object); 1832 return; 1833 } 1834 VM_OBJECT_LOCK_DOWNGRADE(object); 1835 } 1836 1837 psize = atop(size); 1838 if (psize + pindex > object->size) { 1839 if (object->size < pindex) { 1840 VM_OBJECT_RUNLOCK(object); 1841 return; 1842 } 1843 psize = object->size - pindex; 1844 } 1845 1846 start = 0; 1847 p_start = NULL; 1848 threshold = MAX_INIT_PT; 1849 1850 p = vm_page_find_least(object, pindex); 1851 /* 1852 * Assert: the variable p is either (1) the page with the 1853 * least pindex greater than or equal to the parameter pindex 1854 * or (2) NULL. 1855 */ 1856 for (; 1857 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1858 p = TAILQ_NEXT(p, listq)) { 1859 /* 1860 * don't allow an madvise to blow away our really 1861 * free pages allocating pv entries. 1862 */ 1863 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 1864 vm_cnt.v_free_count < vm_cnt.v_free_reserved) || 1865 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 1866 tmpidx >= threshold)) { 1867 psize = tmpidx; 1868 break; 1869 } 1870 if (p->valid == VM_PAGE_BITS_ALL) { 1871 if (p_start == NULL) { 1872 start = addr + ptoa(tmpidx); 1873 p_start = p; 1874 } 1875 /* Jump ahead if a superpage mapping is possible. */ 1876 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 1877 (pagesizes[p->psind] - 1)) == 0) { 1878 mask = atop(pagesizes[p->psind]) - 1; 1879 if (tmpidx + mask < psize && 1880 vm_page_ps_is_valid(p)) { 1881 p += mask; 1882 threshold += mask; 1883 } 1884 } 1885 } else if (p_start != NULL) { 1886 pmap_enter_object(map->pmap, start, addr + 1887 ptoa(tmpidx), p_start, prot); 1888 p_start = NULL; 1889 } 1890 } 1891 if (p_start != NULL) 1892 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1893 p_start, prot); 1894 VM_OBJECT_RUNLOCK(object); 1895 } 1896 1897 /* 1898 * vm_map_protect: 1899 * 1900 * Sets the protection of the specified address 1901 * region in the target map. If "set_max" is 1902 * specified, the maximum protection is to be set; 1903 * otherwise, only the current protection is affected. 1904 */ 1905 int 1906 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1907 vm_prot_t new_prot, boolean_t set_max) 1908 { 1909 vm_map_entry_t current, entry; 1910 vm_object_t obj; 1911 struct ucred *cred; 1912 vm_prot_t old_prot; 1913 1914 if (start == end) 1915 return (KERN_SUCCESS); 1916 1917 vm_map_lock(map); 1918 1919 VM_MAP_RANGE_CHECK(map, start, end); 1920 1921 if (vm_map_lookup_entry(map, start, &entry)) { 1922 vm_map_clip_start(map, entry, start); 1923 } else { 1924 entry = entry->next; 1925 } 1926 1927 /* 1928 * Make a first pass to check for protection violations. 1929 */ 1930 current = entry; 1931 while ((current != &map->header) && (current->start < end)) { 1932 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1933 vm_map_unlock(map); 1934 return (KERN_INVALID_ARGUMENT); 1935 } 1936 if ((new_prot & current->max_protection) != new_prot) { 1937 vm_map_unlock(map); 1938 return (KERN_PROTECTION_FAILURE); 1939 } 1940 current = current->next; 1941 } 1942 1943 1944 /* 1945 * Do an accounting pass for private read-only mappings that 1946 * now will do cow due to allowed write (e.g. debugger sets 1947 * breakpoint on text segment) 1948 */ 1949 for (current = entry; (current != &map->header) && 1950 (current->start < end); current = current->next) { 1951 1952 vm_map_clip_end(map, current, end); 1953 1954 if (set_max || 1955 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 1956 ENTRY_CHARGED(current)) { 1957 continue; 1958 } 1959 1960 cred = curthread->td_ucred; 1961 obj = current->object.vm_object; 1962 1963 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 1964 if (!swap_reserve(current->end - current->start)) { 1965 vm_map_unlock(map); 1966 return (KERN_RESOURCE_SHORTAGE); 1967 } 1968 crhold(cred); 1969 current->cred = cred; 1970 continue; 1971 } 1972 1973 VM_OBJECT_WLOCK(obj); 1974 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 1975 VM_OBJECT_WUNLOCK(obj); 1976 continue; 1977 } 1978 1979 /* 1980 * Charge for the whole object allocation now, since 1981 * we cannot distinguish between non-charged and 1982 * charged clipped mapping of the same object later. 1983 */ 1984 KASSERT(obj->charge == 0, 1985 ("vm_map_protect: object %p overcharged (entry %p)", 1986 obj, current)); 1987 if (!swap_reserve(ptoa(obj->size))) { 1988 VM_OBJECT_WUNLOCK(obj); 1989 vm_map_unlock(map); 1990 return (KERN_RESOURCE_SHORTAGE); 1991 } 1992 1993 crhold(cred); 1994 obj->cred = cred; 1995 obj->charge = ptoa(obj->size); 1996 VM_OBJECT_WUNLOCK(obj); 1997 } 1998 1999 /* 2000 * Go back and fix up protections. [Note that clipping is not 2001 * necessary the second time.] 2002 */ 2003 current = entry; 2004 while ((current != &map->header) && (current->start < end)) { 2005 old_prot = current->protection; 2006 2007 if (set_max) 2008 current->protection = 2009 (current->max_protection = new_prot) & 2010 old_prot; 2011 else 2012 current->protection = new_prot; 2013 2014 /* 2015 * For user wired map entries, the normal lazy evaluation of 2016 * write access upgrades through soft page faults is 2017 * undesirable. Instead, immediately copy any pages that are 2018 * copy-on-write and enable write access in the physical map. 2019 */ 2020 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2021 (current->protection & VM_PROT_WRITE) != 0 && 2022 (old_prot & VM_PROT_WRITE) == 0) 2023 vm_fault_copy_entry(map, map, current, current, NULL); 2024 2025 /* 2026 * When restricting access, update the physical map. Worry 2027 * about copy-on-write here. 2028 */ 2029 if ((old_prot & ~current->protection) != 0) { 2030 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2031 VM_PROT_ALL) 2032 pmap_protect(map->pmap, current->start, 2033 current->end, 2034 current->protection & MASK(current)); 2035 #undef MASK 2036 } 2037 vm_map_simplify_entry(map, current); 2038 current = current->next; 2039 } 2040 vm_map_unlock(map); 2041 return (KERN_SUCCESS); 2042 } 2043 2044 /* 2045 * vm_map_madvise: 2046 * 2047 * This routine traverses a processes map handling the madvise 2048 * system call. Advisories are classified as either those effecting 2049 * the vm_map_entry structure, or those effecting the underlying 2050 * objects. 2051 */ 2052 int 2053 vm_map_madvise( 2054 vm_map_t map, 2055 vm_offset_t start, 2056 vm_offset_t end, 2057 int behav) 2058 { 2059 vm_map_entry_t current, entry; 2060 int modify_map = 0; 2061 2062 /* 2063 * Some madvise calls directly modify the vm_map_entry, in which case 2064 * we need to use an exclusive lock on the map and we need to perform 2065 * various clipping operations. Otherwise we only need a read-lock 2066 * on the map. 2067 */ 2068 switch(behav) { 2069 case MADV_NORMAL: 2070 case MADV_SEQUENTIAL: 2071 case MADV_RANDOM: 2072 case MADV_NOSYNC: 2073 case MADV_AUTOSYNC: 2074 case MADV_NOCORE: 2075 case MADV_CORE: 2076 if (start == end) 2077 return (KERN_SUCCESS); 2078 modify_map = 1; 2079 vm_map_lock(map); 2080 break; 2081 case MADV_WILLNEED: 2082 case MADV_DONTNEED: 2083 case MADV_FREE: 2084 if (start == end) 2085 return (KERN_SUCCESS); 2086 vm_map_lock_read(map); 2087 break; 2088 default: 2089 return (KERN_INVALID_ARGUMENT); 2090 } 2091 2092 /* 2093 * Locate starting entry and clip if necessary. 2094 */ 2095 VM_MAP_RANGE_CHECK(map, start, end); 2096 2097 if (vm_map_lookup_entry(map, start, &entry)) { 2098 if (modify_map) 2099 vm_map_clip_start(map, entry, start); 2100 } else { 2101 entry = entry->next; 2102 } 2103 2104 if (modify_map) { 2105 /* 2106 * madvise behaviors that are implemented in the vm_map_entry. 2107 * 2108 * We clip the vm_map_entry so that behavioral changes are 2109 * limited to the specified address range. 2110 */ 2111 for (current = entry; 2112 (current != &map->header) && (current->start < end); 2113 current = current->next 2114 ) { 2115 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2116 continue; 2117 2118 vm_map_clip_end(map, current, end); 2119 2120 switch (behav) { 2121 case MADV_NORMAL: 2122 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2123 break; 2124 case MADV_SEQUENTIAL: 2125 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2126 break; 2127 case MADV_RANDOM: 2128 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2129 break; 2130 case MADV_NOSYNC: 2131 current->eflags |= MAP_ENTRY_NOSYNC; 2132 break; 2133 case MADV_AUTOSYNC: 2134 current->eflags &= ~MAP_ENTRY_NOSYNC; 2135 break; 2136 case MADV_NOCORE: 2137 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2138 break; 2139 case MADV_CORE: 2140 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2141 break; 2142 default: 2143 break; 2144 } 2145 vm_map_simplify_entry(map, current); 2146 } 2147 vm_map_unlock(map); 2148 } else { 2149 vm_pindex_t pstart, pend; 2150 2151 /* 2152 * madvise behaviors that are implemented in the underlying 2153 * vm_object. 2154 * 2155 * Since we don't clip the vm_map_entry, we have to clip 2156 * the vm_object pindex and count. 2157 */ 2158 for (current = entry; 2159 (current != &map->header) && (current->start < end); 2160 current = current->next 2161 ) { 2162 vm_offset_t useEnd, useStart; 2163 2164 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2165 continue; 2166 2167 pstart = OFF_TO_IDX(current->offset); 2168 pend = pstart + atop(current->end - current->start); 2169 useStart = current->start; 2170 useEnd = current->end; 2171 2172 if (current->start < start) { 2173 pstart += atop(start - current->start); 2174 useStart = start; 2175 } 2176 if (current->end > end) { 2177 pend -= atop(current->end - end); 2178 useEnd = end; 2179 } 2180 2181 if (pstart >= pend) 2182 continue; 2183 2184 /* 2185 * Perform the pmap_advise() before clearing 2186 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2187 * concurrent pmap operation, such as pmap_remove(), 2188 * could clear a reference in the pmap and set 2189 * PGA_REFERENCED on the page before the pmap_advise() 2190 * had completed. Consequently, the page would appear 2191 * referenced based upon an old reference that 2192 * occurred before this pmap_advise() ran. 2193 */ 2194 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2195 pmap_advise(map->pmap, useStart, useEnd, 2196 behav); 2197 2198 vm_object_madvise(current->object.vm_object, pstart, 2199 pend, behav); 2200 if (behav == MADV_WILLNEED) { 2201 vm_map_pmap_enter(map, 2202 useStart, 2203 current->protection, 2204 current->object.vm_object, 2205 pstart, 2206 ptoa(pend - pstart), 2207 MAP_PREFAULT_MADVISE 2208 ); 2209 } 2210 } 2211 vm_map_unlock_read(map); 2212 } 2213 return (0); 2214 } 2215 2216 2217 /* 2218 * vm_map_inherit: 2219 * 2220 * Sets the inheritance of the specified address 2221 * range in the target map. Inheritance 2222 * affects how the map will be shared with 2223 * child maps at the time of vmspace_fork. 2224 */ 2225 int 2226 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2227 vm_inherit_t new_inheritance) 2228 { 2229 vm_map_entry_t entry; 2230 vm_map_entry_t temp_entry; 2231 2232 switch (new_inheritance) { 2233 case VM_INHERIT_NONE: 2234 case VM_INHERIT_COPY: 2235 case VM_INHERIT_SHARE: 2236 break; 2237 default: 2238 return (KERN_INVALID_ARGUMENT); 2239 } 2240 if (start == end) 2241 return (KERN_SUCCESS); 2242 vm_map_lock(map); 2243 VM_MAP_RANGE_CHECK(map, start, end); 2244 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2245 entry = temp_entry; 2246 vm_map_clip_start(map, entry, start); 2247 } else 2248 entry = temp_entry->next; 2249 while ((entry != &map->header) && (entry->start < end)) { 2250 vm_map_clip_end(map, entry, end); 2251 entry->inheritance = new_inheritance; 2252 vm_map_simplify_entry(map, entry); 2253 entry = entry->next; 2254 } 2255 vm_map_unlock(map); 2256 return (KERN_SUCCESS); 2257 } 2258 2259 /* 2260 * vm_map_unwire: 2261 * 2262 * Implements both kernel and user unwiring. 2263 */ 2264 int 2265 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2266 int flags) 2267 { 2268 vm_map_entry_t entry, first_entry, tmp_entry; 2269 vm_offset_t saved_start; 2270 unsigned int last_timestamp; 2271 int rv; 2272 boolean_t need_wakeup, result, user_unwire; 2273 2274 if (start == end) 2275 return (KERN_SUCCESS); 2276 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2277 vm_map_lock(map); 2278 VM_MAP_RANGE_CHECK(map, start, end); 2279 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2280 if (flags & VM_MAP_WIRE_HOLESOK) 2281 first_entry = first_entry->next; 2282 else { 2283 vm_map_unlock(map); 2284 return (KERN_INVALID_ADDRESS); 2285 } 2286 } 2287 last_timestamp = map->timestamp; 2288 entry = first_entry; 2289 while (entry != &map->header && entry->start < end) { 2290 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2291 /* 2292 * We have not yet clipped the entry. 2293 */ 2294 saved_start = (start >= entry->start) ? start : 2295 entry->start; 2296 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2297 if (vm_map_unlock_and_wait(map, 0)) { 2298 /* 2299 * Allow interruption of user unwiring? 2300 */ 2301 } 2302 vm_map_lock(map); 2303 if (last_timestamp+1 != map->timestamp) { 2304 /* 2305 * Look again for the entry because the map was 2306 * modified while it was unlocked. 2307 * Specifically, the entry may have been 2308 * clipped, merged, or deleted. 2309 */ 2310 if (!vm_map_lookup_entry(map, saved_start, 2311 &tmp_entry)) { 2312 if (flags & VM_MAP_WIRE_HOLESOK) 2313 tmp_entry = tmp_entry->next; 2314 else { 2315 if (saved_start == start) { 2316 /* 2317 * First_entry has been deleted. 2318 */ 2319 vm_map_unlock(map); 2320 return (KERN_INVALID_ADDRESS); 2321 } 2322 end = saved_start; 2323 rv = KERN_INVALID_ADDRESS; 2324 goto done; 2325 } 2326 } 2327 if (entry == first_entry) 2328 first_entry = tmp_entry; 2329 else 2330 first_entry = NULL; 2331 entry = tmp_entry; 2332 } 2333 last_timestamp = map->timestamp; 2334 continue; 2335 } 2336 vm_map_clip_start(map, entry, start); 2337 vm_map_clip_end(map, entry, end); 2338 /* 2339 * Mark the entry in case the map lock is released. (See 2340 * above.) 2341 */ 2342 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2343 entry->wiring_thread == NULL, 2344 ("owned map entry %p", entry)); 2345 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2346 entry->wiring_thread = curthread; 2347 /* 2348 * Check the map for holes in the specified region. 2349 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2350 */ 2351 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2352 (entry->end < end && (entry->next == &map->header || 2353 entry->next->start > entry->end))) { 2354 end = entry->end; 2355 rv = KERN_INVALID_ADDRESS; 2356 goto done; 2357 } 2358 /* 2359 * If system unwiring, require that the entry is system wired. 2360 */ 2361 if (!user_unwire && 2362 vm_map_entry_system_wired_count(entry) == 0) { 2363 end = entry->end; 2364 rv = KERN_INVALID_ARGUMENT; 2365 goto done; 2366 } 2367 entry = entry->next; 2368 } 2369 rv = KERN_SUCCESS; 2370 done: 2371 need_wakeup = FALSE; 2372 if (first_entry == NULL) { 2373 result = vm_map_lookup_entry(map, start, &first_entry); 2374 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2375 first_entry = first_entry->next; 2376 else 2377 KASSERT(result, ("vm_map_unwire: lookup failed")); 2378 } 2379 for (entry = first_entry; entry != &map->header && entry->start < end; 2380 entry = entry->next) { 2381 /* 2382 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2383 * space in the unwired region could have been mapped 2384 * while the map lock was dropped for draining 2385 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2386 * could be simultaneously wiring this new mapping 2387 * entry. Detect these cases and skip any entries 2388 * marked as in transition by us. 2389 */ 2390 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2391 entry->wiring_thread != curthread) { 2392 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2393 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2394 continue; 2395 } 2396 2397 if (rv == KERN_SUCCESS && (!user_unwire || 2398 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2399 if (user_unwire) 2400 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2401 entry->wired_count--; 2402 if (entry->wired_count == 0) { 2403 /* 2404 * Retain the map lock. 2405 */ 2406 vm_fault_unwire(map, entry->start, entry->end, 2407 entry->object.vm_object != NULL && 2408 (entry->object.vm_object->flags & 2409 OBJ_FICTITIOUS) != 0); 2410 } 2411 } 2412 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2413 ("vm_map_unwire: in-transition flag missing %p", entry)); 2414 KASSERT(entry->wiring_thread == curthread, 2415 ("vm_map_unwire: alien wire %p", entry)); 2416 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2417 entry->wiring_thread = NULL; 2418 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2419 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2420 need_wakeup = TRUE; 2421 } 2422 vm_map_simplify_entry(map, entry); 2423 } 2424 vm_map_unlock(map); 2425 if (need_wakeup) 2426 vm_map_wakeup(map); 2427 return (rv); 2428 } 2429 2430 /* 2431 * vm_map_wire: 2432 * 2433 * Implements both kernel and user wiring. 2434 */ 2435 int 2436 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2437 int flags) 2438 { 2439 vm_map_entry_t entry, first_entry, tmp_entry; 2440 vm_offset_t saved_end, saved_start; 2441 unsigned int last_timestamp; 2442 int rv; 2443 boolean_t fictitious, need_wakeup, result, user_wire; 2444 vm_prot_t prot; 2445 2446 if (start == end) 2447 return (KERN_SUCCESS); 2448 prot = 0; 2449 if (flags & VM_MAP_WIRE_WRITE) 2450 prot |= VM_PROT_WRITE; 2451 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2452 vm_map_lock(map); 2453 VM_MAP_RANGE_CHECK(map, start, end); 2454 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2455 if (flags & VM_MAP_WIRE_HOLESOK) 2456 first_entry = first_entry->next; 2457 else { 2458 vm_map_unlock(map); 2459 return (KERN_INVALID_ADDRESS); 2460 } 2461 } 2462 last_timestamp = map->timestamp; 2463 entry = first_entry; 2464 while (entry != &map->header && entry->start < end) { 2465 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2466 /* 2467 * We have not yet clipped the entry. 2468 */ 2469 saved_start = (start >= entry->start) ? start : 2470 entry->start; 2471 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2472 if (vm_map_unlock_and_wait(map, 0)) { 2473 /* 2474 * Allow interruption of user wiring? 2475 */ 2476 } 2477 vm_map_lock(map); 2478 if (last_timestamp + 1 != map->timestamp) { 2479 /* 2480 * Look again for the entry because the map was 2481 * modified while it was unlocked. 2482 * Specifically, the entry may have been 2483 * clipped, merged, or deleted. 2484 */ 2485 if (!vm_map_lookup_entry(map, saved_start, 2486 &tmp_entry)) { 2487 if (flags & VM_MAP_WIRE_HOLESOK) 2488 tmp_entry = tmp_entry->next; 2489 else { 2490 if (saved_start == start) { 2491 /* 2492 * first_entry has been deleted. 2493 */ 2494 vm_map_unlock(map); 2495 return (KERN_INVALID_ADDRESS); 2496 } 2497 end = saved_start; 2498 rv = KERN_INVALID_ADDRESS; 2499 goto done; 2500 } 2501 } 2502 if (entry == first_entry) 2503 first_entry = tmp_entry; 2504 else 2505 first_entry = NULL; 2506 entry = tmp_entry; 2507 } 2508 last_timestamp = map->timestamp; 2509 continue; 2510 } 2511 vm_map_clip_start(map, entry, start); 2512 vm_map_clip_end(map, entry, end); 2513 /* 2514 * Mark the entry in case the map lock is released. (See 2515 * above.) 2516 */ 2517 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2518 entry->wiring_thread == NULL, 2519 ("owned map entry %p", entry)); 2520 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2521 entry->wiring_thread = curthread; 2522 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2523 || (entry->protection & prot) != prot) { 2524 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2525 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2526 end = entry->end; 2527 rv = KERN_INVALID_ADDRESS; 2528 goto done; 2529 } 2530 goto next_entry; 2531 } 2532 if (entry->wired_count == 0) { 2533 entry->wired_count++; 2534 saved_start = entry->start; 2535 saved_end = entry->end; 2536 fictitious = entry->object.vm_object != NULL && 2537 (entry->object.vm_object->flags & 2538 OBJ_FICTITIOUS) != 0; 2539 /* 2540 * Release the map lock, relying on the in-transition 2541 * mark. Mark the map busy for fork. 2542 */ 2543 vm_map_busy(map); 2544 vm_map_unlock(map); 2545 rv = vm_fault_wire(map, saved_start, saved_end, 2546 fictitious); 2547 vm_map_lock(map); 2548 vm_map_unbusy(map); 2549 if (last_timestamp + 1 != map->timestamp) { 2550 /* 2551 * Look again for the entry because the map was 2552 * modified while it was unlocked. The entry 2553 * may have been clipped, but NOT merged or 2554 * deleted. 2555 */ 2556 result = vm_map_lookup_entry(map, saved_start, 2557 &tmp_entry); 2558 KASSERT(result, ("vm_map_wire: lookup failed")); 2559 if (entry == first_entry) 2560 first_entry = tmp_entry; 2561 else 2562 first_entry = NULL; 2563 entry = tmp_entry; 2564 while (entry->end < saved_end) { 2565 if (rv != KERN_SUCCESS) { 2566 KASSERT(entry->wired_count == 1, 2567 ("vm_map_wire: bad count")); 2568 entry->wired_count = -1; 2569 } 2570 entry = entry->next; 2571 } 2572 } 2573 last_timestamp = map->timestamp; 2574 if (rv != KERN_SUCCESS) { 2575 KASSERT(entry->wired_count == 1, 2576 ("vm_map_wire: bad count")); 2577 /* 2578 * Assign an out-of-range value to represent 2579 * the failure to wire this entry. 2580 */ 2581 entry->wired_count = -1; 2582 end = entry->end; 2583 goto done; 2584 } 2585 } else if (!user_wire || 2586 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2587 entry->wired_count++; 2588 } 2589 /* 2590 * Check the map for holes in the specified region. 2591 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2592 */ 2593 next_entry: 2594 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2595 (entry->end < end && (entry->next == &map->header || 2596 entry->next->start > entry->end))) { 2597 end = entry->end; 2598 rv = KERN_INVALID_ADDRESS; 2599 goto done; 2600 } 2601 entry = entry->next; 2602 } 2603 rv = KERN_SUCCESS; 2604 done: 2605 need_wakeup = FALSE; 2606 if (first_entry == NULL) { 2607 result = vm_map_lookup_entry(map, start, &first_entry); 2608 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2609 first_entry = first_entry->next; 2610 else 2611 KASSERT(result, ("vm_map_wire: lookup failed")); 2612 } 2613 for (entry = first_entry; entry != &map->header && entry->start < end; 2614 entry = entry->next) { 2615 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2616 goto next_entry_done; 2617 2618 /* 2619 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2620 * space in the unwired region could have been mapped 2621 * while the map lock was dropped for faulting in the 2622 * pages or draining MAP_ENTRY_IN_TRANSITION. 2623 * Moreover, another thread could be simultaneously 2624 * wiring this new mapping entry. Detect these cases 2625 * and skip any entries marked as in transition by us. 2626 */ 2627 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2628 entry->wiring_thread != curthread) { 2629 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2630 ("vm_map_wire: !HOLESOK and new/changed entry")); 2631 continue; 2632 } 2633 2634 if (rv == KERN_SUCCESS) { 2635 if (user_wire) 2636 entry->eflags |= MAP_ENTRY_USER_WIRED; 2637 } else if (entry->wired_count == -1) { 2638 /* 2639 * Wiring failed on this entry. Thus, unwiring is 2640 * unnecessary. 2641 */ 2642 entry->wired_count = 0; 2643 } else { 2644 if (!user_wire || 2645 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 2646 entry->wired_count--; 2647 if (entry->wired_count == 0) { 2648 /* 2649 * Retain the map lock. 2650 */ 2651 vm_fault_unwire(map, entry->start, entry->end, 2652 entry->object.vm_object != NULL && 2653 (entry->object.vm_object->flags & 2654 OBJ_FICTITIOUS) != 0); 2655 } 2656 } 2657 next_entry_done: 2658 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2659 ("vm_map_wire: in-transition flag missing %p", entry)); 2660 KASSERT(entry->wiring_thread == curthread, 2661 ("vm_map_wire: alien wire %p", entry)); 2662 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2663 MAP_ENTRY_WIRE_SKIPPED); 2664 entry->wiring_thread = NULL; 2665 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2666 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2667 need_wakeup = TRUE; 2668 } 2669 vm_map_simplify_entry(map, entry); 2670 } 2671 vm_map_unlock(map); 2672 if (need_wakeup) 2673 vm_map_wakeup(map); 2674 return (rv); 2675 } 2676 2677 /* 2678 * vm_map_sync 2679 * 2680 * Push any dirty cached pages in the address range to their pager. 2681 * If syncio is TRUE, dirty pages are written synchronously. 2682 * If invalidate is TRUE, any cached pages are freed as well. 2683 * 2684 * If the size of the region from start to end is zero, we are 2685 * supposed to flush all modified pages within the region containing 2686 * start. Unfortunately, a region can be split or coalesced with 2687 * neighboring regions, making it difficult to determine what the 2688 * original region was. Therefore, we approximate this requirement by 2689 * flushing the current region containing start. 2690 * 2691 * Returns an error if any part of the specified range is not mapped. 2692 */ 2693 int 2694 vm_map_sync( 2695 vm_map_t map, 2696 vm_offset_t start, 2697 vm_offset_t end, 2698 boolean_t syncio, 2699 boolean_t invalidate) 2700 { 2701 vm_map_entry_t current; 2702 vm_map_entry_t entry; 2703 vm_size_t size; 2704 vm_object_t object; 2705 vm_ooffset_t offset; 2706 unsigned int last_timestamp; 2707 boolean_t failed; 2708 2709 vm_map_lock_read(map); 2710 VM_MAP_RANGE_CHECK(map, start, end); 2711 if (!vm_map_lookup_entry(map, start, &entry)) { 2712 vm_map_unlock_read(map); 2713 return (KERN_INVALID_ADDRESS); 2714 } else if (start == end) { 2715 start = entry->start; 2716 end = entry->end; 2717 } 2718 /* 2719 * Make a first pass to check for user-wired memory and holes. 2720 */ 2721 for (current = entry; current != &map->header && current->start < end; 2722 current = current->next) { 2723 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2724 vm_map_unlock_read(map); 2725 return (KERN_INVALID_ARGUMENT); 2726 } 2727 if (end > current->end && 2728 (current->next == &map->header || 2729 current->end != current->next->start)) { 2730 vm_map_unlock_read(map); 2731 return (KERN_INVALID_ADDRESS); 2732 } 2733 } 2734 2735 if (invalidate) 2736 pmap_remove(map->pmap, start, end); 2737 failed = FALSE; 2738 2739 /* 2740 * Make a second pass, cleaning/uncaching pages from the indicated 2741 * objects as we go. 2742 */ 2743 for (current = entry; current != &map->header && current->start < end;) { 2744 offset = current->offset + (start - current->start); 2745 size = (end <= current->end ? end : current->end) - start; 2746 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2747 vm_map_t smap; 2748 vm_map_entry_t tentry; 2749 vm_size_t tsize; 2750 2751 smap = current->object.sub_map; 2752 vm_map_lock_read(smap); 2753 (void) vm_map_lookup_entry(smap, offset, &tentry); 2754 tsize = tentry->end - offset; 2755 if (tsize < size) 2756 size = tsize; 2757 object = tentry->object.vm_object; 2758 offset = tentry->offset + (offset - tentry->start); 2759 vm_map_unlock_read(smap); 2760 } else { 2761 object = current->object.vm_object; 2762 } 2763 vm_object_reference(object); 2764 last_timestamp = map->timestamp; 2765 vm_map_unlock_read(map); 2766 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2767 failed = TRUE; 2768 start += size; 2769 vm_object_deallocate(object); 2770 vm_map_lock_read(map); 2771 if (last_timestamp == map->timestamp || 2772 !vm_map_lookup_entry(map, start, ¤t)) 2773 current = current->next; 2774 } 2775 2776 vm_map_unlock_read(map); 2777 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2778 } 2779 2780 /* 2781 * vm_map_entry_unwire: [ internal use only ] 2782 * 2783 * Make the region specified by this entry pageable. 2784 * 2785 * The map in question should be locked. 2786 * [This is the reason for this routine's existence.] 2787 */ 2788 static void 2789 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2790 { 2791 vm_fault_unwire(map, entry->start, entry->end, 2792 entry->object.vm_object != NULL && 2793 (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0); 2794 entry->wired_count = 0; 2795 } 2796 2797 static void 2798 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2799 { 2800 2801 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2802 vm_object_deallocate(entry->object.vm_object); 2803 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2804 } 2805 2806 /* 2807 * vm_map_entry_delete: [ internal use only ] 2808 * 2809 * Deallocate the given entry from the target map. 2810 */ 2811 static void 2812 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2813 { 2814 vm_object_t object; 2815 vm_pindex_t offidxstart, offidxend, count, size1; 2816 vm_ooffset_t size; 2817 2818 vm_map_entry_unlink(map, entry); 2819 object = entry->object.vm_object; 2820 size = entry->end - entry->start; 2821 map->size -= size; 2822 2823 if (entry->cred != NULL) { 2824 swap_release_by_cred(size, entry->cred); 2825 crfree(entry->cred); 2826 } 2827 2828 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2829 (object != NULL)) { 2830 KASSERT(entry->cred == NULL || object->cred == NULL || 2831 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2832 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 2833 count = OFF_TO_IDX(size); 2834 offidxstart = OFF_TO_IDX(entry->offset); 2835 offidxend = offidxstart + count; 2836 VM_OBJECT_WLOCK(object); 2837 if (object->ref_count != 1 && 2838 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2839 object == kernel_object || object == kmem_object)) { 2840 vm_object_collapse(object); 2841 2842 /* 2843 * The option OBJPR_NOTMAPPED can be passed here 2844 * because vm_map_delete() already performed 2845 * pmap_remove() on the only mapping to this range 2846 * of pages. 2847 */ 2848 vm_object_page_remove(object, offidxstart, offidxend, 2849 OBJPR_NOTMAPPED); 2850 if (object->type == OBJT_SWAP) 2851 swap_pager_freespace(object, offidxstart, count); 2852 if (offidxend >= object->size && 2853 offidxstart < object->size) { 2854 size1 = object->size; 2855 object->size = offidxstart; 2856 if (object->cred != NULL) { 2857 size1 -= object->size; 2858 KASSERT(object->charge >= ptoa(size1), 2859 ("vm_map_entry_delete: object->charge < 0")); 2860 swap_release_by_cred(ptoa(size1), object->cred); 2861 object->charge -= ptoa(size1); 2862 } 2863 } 2864 } 2865 VM_OBJECT_WUNLOCK(object); 2866 } else 2867 entry->object.vm_object = NULL; 2868 if (map->system_map) 2869 vm_map_entry_deallocate(entry, TRUE); 2870 else { 2871 entry->next = curthread->td_map_def_user; 2872 curthread->td_map_def_user = entry; 2873 } 2874 } 2875 2876 /* 2877 * vm_map_delete: [ internal use only ] 2878 * 2879 * Deallocates the given address range from the target 2880 * map. 2881 */ 2882 int 2883 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2884 { 2885 vm_map_entry_t entry; 2886 vm_map_entry_t first_entry; 2887 2888 VM_MAP_ASSERT_LOCKED(map); 2889 if (start == end) 2890 return (KERN_SUCCESS); 2891 2892 /* 2893 * Find the start of the region, and clip it 2894 */ 2895 if (!vm_map_lookup_entry(map, start, &first_entry)) 2896 entry = first_entry->next; 2897 else { 2898 entry = first_entry; 2899 vm_map_clip_start(map, entry, start); 2900 } 2901 2902 /* 2903 * Step through all entries in this region 2904 */ 2905 while ((entry != &map->header) && (entry->start < end)) { 2906 vm_map_entry_t next; 2907 2908 /* 2909 * Wait for wiring or unwiring of an entry to complete. 2910 * Also wait for any system wirings to disappear on 2911 * user maps. 2912 */ 2913 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 2914 (vm_map_pmap(map) != kernel_pmap && 2915 vm_map_entry_system_wired_count(entry) != 0)) { 2916 unsigned int last_timestamp; 2917 vm_offset_t saved_start; 2918 vm_map_entry_t tmp_entry; 2919 2920 saved_start = entry->start; 2921 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2922 last_timestamp = map->timestamp; 2923 (void) vm_map_unlock_and_wait(map, 0); 2924 vm_map_lock(map); 2925 if (last_timestamp + 1 != map->timestamp) { 2926 /* 2927 * Look again for the entry because the map was 2928 * modified while it was unlocked. 2929 * Specifically, the entry may have been 2930 * clipped, merged, or deleted. 2931 */ 2932 if (!vm_map_lookup_entry(map, saved_start, 2933 &tmp_entry)) 2934 entry = tmp_entry->next; 2935 else { 2936 entry = tmp_entry; 2937 vm_map_clip_start(map, entry, 2938 saved_start); 2939 } 2940 } 2941 continue; 2942 } 2943 vm_map_clip_end(map, entry, end); 2944 2945 next = entry->next; 2946 2947 /* 2948 * Unwire before removing addresses from the pmap; otherwise, 2949 * unwiring will put the entries back in the pmap. 2950 */ 2951 if (entry->wired_count != 0) { 2952 vm_map_entry_unwire(map, entry); 2953 } 2954 2955 pmap_remove(map->pmap, entry->start, entry->end); 2956 2957 /* 2958 * Delete the entry only after removing all pmap 2959 * entries pointing to its pages. (Otherwise, its 2960 * page frames may be reallocated, and any modify bits 2961 * will be set in the wrong object!) 2962 */ 2963 vm_map_entry_delete(map, entry); 2964 entry = next; 2965 } 2966 return (KERN_SUCCESS); 2967 } 2968 2969 /* 2970 * vm_map_remove: 2971 * 2972 * Remove the given address range from the target map. 2973 * This is the exported form of vm_map_delete. 2974 */ 2975 int 2976 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2977 { 2978 int result; 2979 2980 vm_map_lock(map); 2981 VM_MAP_RANGE_CHECK(map, start, end); 2982 result = vm_map_delete(map, start, end); 2983 vm_map_unlock(map); 2984 return (result); 2985 } 2986 2987 /* 2988 * vm_map_check_protection: 2989 * 2990 * Assert that the target map allows the specified privilege on the 2991 * entire address region given. The entire region must be allocated. 2992 * 2993 * WARNING! This code does not and should not check whether the 2994 * contents of the region is accessible. For example a smaller file 2995 * might be mapped into a larger address space. 2996 * 2997 * NOTE! This code is also called by munmap(). 2998 * 2999 * The map must be locked. A read lock is sufficient. 3000 */ 3001 boolean_t 3002 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3003 vm_prot_t protection) 3004 { 3005 vm_map_entry_t entry; 3006 vm_map_entry_t tmp_entry; 3007 3008 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3009 return (FALSE); 3010 entry = tmp_entry; 3011 3012 while (start < end) { 3013 if (entry == &map->header) 3014 return (FALSE); 3015 /* 3016 * No holes allowed! 3017 */ 3018 if (start < entry->start) 3019 return (FALSE); 3020 /* 3021 * Check protection associated with entry. 3022 */ 3023 if ((entry->protection & protection) != protection) 3024 return (FALSE); 3025 /* go to next entry */ 3026 start = entry->end; 3027 entry = entry->next; 3028 } 3029 return (TRUE); 3030 } 3031 3032 /* 3033 * vm_map_copy_entry: 3034 * 3035 * Copies the contents of the source entry to the destination 3036 * entry. The entries *must* be aligned properly. 3037 */ 3038 static void 3039 vm_map_copy_entry( 3040 vm_map_t src_map, 3041 vm_map_t dst_map, 3042 vm_map_entry_t src_entry, 3043 vm_map_entry_t dst_entry, 3044 vm_ooffset_t *fork_charge) 3045 { 3046 vm_object_t src_object; 3047 vm_map_entry_t fake_entry; 3048 vm_offset_t size; 3049 struct ucred *cred; 3050 int charged; 3051 3052 VM_MAP_ASSERT_LOCKED(dst_map); 3053 3054 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3055 return; 3056 3057 if (src_entry->wired_count == 0 || 3058 (src_entry->protection & VM_PROT_WRITE) == 0) { 3059 /* 3060 * If the source entry is marked needs_copy, it is already 3061 * write-protected. 3062 */ 3063 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3064 (src_entry->protection & VM_PROT_WRITE) != 0) { 3065 pmap_protect(src_map->pmap, 3066 src_entry->start, 3067 src_entry->end, 3068 src_entry->protection & ~VM_PROT_WRITE); 3069 } 3070 3071 /* 3072 * Make a copy of the object. 3073 */ 3074 size = src_entry->end - src_entry->start; 3075 if ((src_object = src_entry->object.vm_object) != NULL) { 3076 VM_OBJECT_WLOCK(src_object); 3077 charged = ENTRY_CHARGED(src_entry); 3078 if ((src_object->handle == NULL) && 3079 (src_object->type == OBJT_DEFAULT || 3080 src_object->type == OBJT_SWAP)) { 3081 vm_object_collapse(src_object); 3082 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3083 vm_object_split(src_entry); 3084 src_object = src_entry->object.vm_object; 3085 } 3086 } 3087 vm_object_reference_locked(src_object); 3088 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3089 if (src_entry->cred != NULL && 3090 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3091 KASSERT(src_object->cred == NULL, 3092 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3093 src_object)); 3094 src_object->cred = src_entry->cred; 3095 src_object->charge = size; 3096 } 3097 VM_OBJECT_WUNLOCK(src_object); 3098 dst_entry->object.vm_object = src_object; 3099 if (charged) { 3100 cred = curthread->td_ucred; 3101 crhold(cred); 3102 dst_entry->cred = cred; 3103 *fork_charge += size; 3104 if (!(src_entry->eflags & 3105 MAP_ENTRY_NEEDS_COPY)) { 3106 crhold(cred); 3107 src_entry->cred = cred; 3108 *fork_charge += size; 3109 } 3110 } 3111 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3112 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3113 dst_entry->offset = src_entry->offset; 3114 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3115 /* 3116 * MAP_ENTRY_VN_WRITECNT cannot 3117 * indicate write reference from 3118 * src_entry, since the entry is 3119 * marked as needs copy. Allocate a 3120 * fake entry that is used to 3121 * decrement object->un_pager.vnp.writecount 3122 * at the appropriate time. Attach 3123 * fake_entry to the deferred list. 3124 */ 3125 fake_entry = vm_map_entry_create(dst_map); 3126 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3127 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3128 vm_object_reference(src_object); 3129 fake_entry->object.vm_object = src_object; 3130 fake_entry->start = src_entry->start; 3131 fake_entry->end = src_entry->end; 3132 fake_entry->next = curthread->td_map_def_user; 3133 curthread->td_map_def_user = fake_entry; 3134 } 3135 } else { 3136 dst_entry->object.vm_object = NULL; 3137 dst_entry->offset = 0; 3138 if (src_entry->cred != NULL) { 3139 dst_entry->cred = curthread->td_ucred; 3140 crhold(dst_entry->cred); 3141 *fork_charge += size; 3142 } 3143 } 3144 3145 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 3146 dst_entry->end - dst_entry->start, src_entry->start); 3147 } else { 3148 /* 3149 * We don't want to make writeable wired pages copy-on-write. 3150 * Immediately copy these pages into the new map by simulating 3151 * page faults. The new pages are pageable. 3152 */ 3153 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3154 fork_charge); 3155 } 3156 } 3157 3158 /* 3159 * vmspace_map_entry_forked: 3160 * Update the newly-forked vmspace each time a map entry is inherited 3161 * or copied. The values for vm_dsize and vm_tsize are approximate 3162 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3163 */ 3164 static void 3165 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3166 vm_map_entry_t entry) 3167 { 3168 vm_size_t entrysize; 3169 vm_offset_t newend; 3170 3171 entrysize = entry->end - entry->start; 3172 vm2->vm_map.size += entrysize; 3173 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3174 vm2->vm_ssize += btoc(entrysize); 3175 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3176 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3177 newend = MIN(entry->end, 3178 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3179 vm2->vm_dsize += btoc(newend - entry->start); 3180 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3181 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3182 newend = MIN(entry->end, 3183 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3184 vm2->vm_tsize += btoc(newend - entry->start); 3185 } 3186 } 3187 3188 /* 3189 * vmspace_fork: 3190 * Create a new process vmspace structure and vm_map 3191 * based on those of an existing process. The new map 3192 * is based on the old map, according to the inheritance 3193 * values on the regions in that map. 3194 * 3195 * XXX It might be worth coalescing the entries added to the new vmspace. 3196 * 3197 * The source map must not be locked. 3198 */ 3199 struct vmspace * 3200 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3201 { 3202 struct vmspace *vm2; 3203 vm_map_t new_map, old_map; 3204 vm_map_entry_t new_entry, old_entry; 3205 vm_object_t object; 3206 int locked; 3207 3208 old_map = &vm1->vm_map; 3209 /* Copy immutable fields of vm1 to vm2. */ 3210 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); 3211 if (vm2 == NULL) 3212 return (NULL); 3213 vm2->vm_taddr = vm1->vm_taddr; 3214 vm2->vm_daddr = vm1->vm_daddr; 3215 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3216 vm_map_lock(old_map); 3217 if (old_map->busy) 3218 vm_map_wait_busy(old_map); 3219 new_map = &vm2->vm_map; 3220 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3221 KASSERT(locked, ("vmspace_fork: lock failed")); 3222 3223 old_entry = old_map->header.next; 3224 3225 while (old_entry != &old_map->header) { 3226 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3227 panic("vm_map_fork: encountered a submap"); 3228 3229 switch (old_entry->inheritance) { 3230 case VM_INHERIT_NONE: 3231 break; 3232 3233 case VM_INHERIT_SHARE: 3234 /* 3235 * Clone the entry, creating the shared object if necessary. 3236 */ 3237 object = old_entry->object.vm_object; 3238 if (object == NULL) { 3239 object = vm_object_allocate(OBJT_DEFAULT, 3240 atop(old_entry->end - old_entry->start)); 3241 old_entry->object.vm_object = object; 3242 old_entry->offset = 0; 3243 if (old_entry->cred != NULL) { 3244 object->cred = old_entry->cred; 3245 object->charge = old_entry->end - 3246 old_entry->start; 3247 old_entry->cred = NULL; 3248 } 3249 } 3250 3251 /* 3252 * Add the reference before calling vm_object_shadow 3253 * to insure that a shadow object is created. 3254 */ 3255 vm_object_reference(object); 3256 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3257 vm_object_shadow(&old_entry->object.vm_object, 3258 &old_entry->offset, 3259 old_entry->end - old_entry->start); 3260 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3261 /* Transfer the second reference too. */ 3262 vm_object_reference( 3263 old_entry->object.vm_object); 3264 3265 /* 3266 * As in vm_map_simplify_entry(), the 3267 * vnode lock will not be acquired in 3268 * this call to vm_object_deallocate(). 3269 */ 3270 vm_object_deallocate(object); 3271 object = old_entry->object.vm_object; 3272 } 3273 VM_OBJECT_WLOCK(object); 3274 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3275 if (old_entry->cred != NULL) { 3276 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3277 object->cred = old_entry->cred; 3278 object->charge = old_entry->end - old_entry->start; 3279 old_entry->cred = NULL; 3280 } 3281 3282 /* 3283 * Assert the correct state of the vnode 3284 * v_writecount while the object is locked, to 3285 * not relock it later for the assertion 3286 * correctness. 3287 */ 3288 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3289 object->type == OBJT_VNODE) { 3290 KASSERT(((struct vnode *)object->handle)-> 3291 v_writecount > 0, 3292 ("vmspace_fork: v_writecount %p", object)); 3293 KASSERT(object->un_pager.vnp.writemappings > 0, 3294 ("vmspace_fork: vnp.writecount %p", 3295 object)); 3296 } 3297 VM_OBJECT_WUNLOCK(object); 3298 3299 /* 3300 * Clone the entry, referencing the shared object. 3301 */ 3302 new_entry = vm_map_entry_create(new_map); 3303 *new_entry = *old_entry; 3304 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3305 MAP_ENTRY_IN_TRANSITION); 3306 new_entry->wiring_thread = NULL; 3307 new_entry->wired_count = 0; 3308 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3309 vnode_pager_update_writecount(object, 3310 new_entry->start, new_entry->end); 3311 } 3312 3313 /* 3314 * Insert the entry into the new map -- we know we're 3315 * inserting at the end of the new map. 3316 */ 3317 vm_map_entry_link(new_map, new_map->header.prev, 3318 new_entry); 3319 vmspace_map_entry_forked(vm1, vm2, new_entry); 3320 3321 /* 3322 * Update the physical map 3323 */ 3324 pmap_copy(new_map->pmap, old_map->pmap, 3325 new_entry->start, 3326 (old_entry->end - old_entry->start), 3327 old_entry->start); 3328 break; 3329 3330 case VM_INHERIT_COPY: 3331 /* 3332 * Clone the entry and link into the map. 3333 */ 3334 new_entry = vm_map_entry_create(new_map); 3335 *new_entry = *old_entry; 3336 /* 3337 * Copied entry is COW over the old object. 3338 */ 3339 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3340 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3341 new_entry->wiring_thread = NULL; 3342 new_entry->wired_count = 0; 3343 new_entry->object.vm_object = NULL; 3344 new_entry->cred = NULL; 3345 vm_map_entry_link(new_map, new_map->header.prev, 3346 new_entry); 3347 vmspace_map_entry_forked(vm1, vm2, new_entry); 3348 vm_map_copy_entry(old_map, new_map, old_entry, 3349 new_entry, fork_charge); 3350 break; 3351 } 3352 old_entry = old_entry->next; 3353 } 3354 /* 3355 * Use inlined vm_map_unlock() to postpone handling the deferred 3356 * map entries, which cannot be done until both old_map and 3357 * new_map locks are released. 3358 */ 3359 sx_xunlock(&old_map->lock); 3360 sx_xunlock(&new_map->lock); 3361 vm_map_process_deferred(); 3362 3363 return (vm2); 3364 } 3365 3366 int 3367 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3368 vm_prot_t prot, vm_prot_t max, int cow) 3369 { 3370 vm_size_t growsize, init_ssize; 3371 rlim_t lmemlim, vmemlim; 3372 int rv; 3373 3374 growsize = sgrowsiz; 3375 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3376 vm_map_lock(map); 3377 PROC_LOCK(curproc); 3378 lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK); 3379 vmemlim = lim_cur(curproc, RLIMIT_VMEM); 3380 PROC_UNLOCK(curproc); 3381 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 3382 if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) { 3383 rv = KERN_NO_SPACE; 3384 goto out; 3385 } 3386 } 3387 /* If we would blow our VMEM resource limit, no go */ 3388 if (map->size + init_ssize > vmemlim) { 3389 rv = KERN_NO_SPACE; 3390 goto out; 3391 } 3392 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3393 max, cow); 3394 out: 3395 vm_map_unlock(map); 3396 return (rv); 3397 } 3398 3399 static int 3400 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3401 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3402 { 3403 vm_map_entry_t new_entry, prev_entry; 3404 vm_offset_t bot, top; 3405 vm_size_t init_ssize; 3406 int orient, rv; 3407 3408 /* 3409 * The stack orientation is piggybacked with the cow argument. 3410 * Extract it into orient and mask the cow argument so that we 3411 * don't pass it around further. 3412 * NOTE: We explicitly allow bi-directional stacks. 3413 */ 3414 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 3415 KASSERT(orient != 0, ("No stack grow direction")); 3416 3417 if (addrbos < vm_map_min(map) || 3418 addrbos > vm_map_max(map) || 3419 addrbos + max_ssize < addrbos) 3420 return (KERN_NO_SPACE); 3421 3422 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3423 3424 /* If addr is already mapped, no go */ 3425 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3426 return (KERN_NO_SPACE); 3427 3428 /* 3429 * If we can't accomodate max_ssize in the current mapping, no go. 3430 * However, we need to be aware that subsequent user mappings might 3431 * map into the space we have reserved for stack, and currently this 3432 * space is not protected. 3433 * 3434 * Hopefully we will at least detect this condition when we try to 3435 * grow the stack. 3436 */ 3437 if ((prev_entry->next != &map->header) && 3438 (prev_entry->next->start < addrbos + max_ssize)) 3439 return (KERN_NO_SPACE); 3440 3441 /* 3442 * We initially map a stack of only init_ssize. We will grow as 3443 * needed later. Depending on the orientation of the stack (i.e. 3444 * the grow direction) we either map at the top of the range, the 3445 * bottom of the range or in the middle. 3446 * 3447 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3448 * and cow to be 0. Possibly we should eliminate these as input 3449 * parameters, and just pass these values here in the insert call. 3450 */ 3451 if (orient == MAP_STACK_GROWS_DOWN) 3452 bot = addrbos + max_ssize - init_ssize; 3453 else if (orient == MAP_STACK_GROWS_UP) 3454 bot = addrbos; 3455 else 3456 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 3457 top = bot + init_ssize; 3458 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3459 3460 /* Now set the avail_ssize amount. */ 3461 if (rv == KERN_SUCCESS) { 3462 new_entry = prev_entry->next; 3463 if (new_entry->end != top || new_entry->start != bot) 3464 panic("Bad entry start/end for new stack entry"); 3465 3466 new_entry->avail_ssize = max_ssize - init_ssize; 3467 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3468 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3469 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3470 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3471 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3472 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3473 } 3474 3475 return (rv); 3476 } 3477 3478 static int stack_guard_page = 0; 3479 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page); 3480 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW, 3481 &stack_guard_page, 0, 3482 "Insert stack guard page ahead of the growable segments."); 3483 3484 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3485 * desired address is already mapped, or if we successfully grow 3486 * the stack. Also returns KERN_SUCCESS if addr is outside the 3487 * stack range (this is strange, but preserves compatibility with 3488 * the grow function in vm_machdep.c). 3489 */ 3490 int 3491 vm_map_growstack(struct proc *p, vm_offset_t addr) 3492 { 3493 vm_map_entry_t next_entry, prev_entry; 3494 vm_map_entry_t new_entry, stack_entry; 3495 struct vmspace *vm = p->p_vmspace; 3496 vm_map_t map = &vm->vm_map; 3497 vm_offset_t end; 3498 vm_size_t growsize; 3499 size_t grow_amount, max_grow; 3500 rlim_t lmemlim, stacklim, vmemlim; 3501 int is_procstack, rv; 3502 struct ucred *cred; 3503 #ifdef notyet 3504 uint64_t limit; 3505 #endif 3506 #ifdef RACCT 3507 int error; 3508 #endif 3509 3510 Retry: 3511 PROC_LOCK(p); 3512 lmemlim = lim_cur(p, RLIMIT_MEMLOCK); 3513 stacklim = lim_cur(p, RLIMIT_STACK); 3514 vmemlim = lim_cur(p, RLIMIT_VMEM); 3515 PROC_UNLOCK(p); 3516 3517 vm_map_lock_read(map); 3518 3519 /* If addr is already in the entry range, no need to grow.*/ 3520 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 3521 vm_map_unlock_read(map); 3522 return (KERN_SUCCESS); 3523 } 3524 3525 next_entry = prev_entry->next; 3526 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 3527 /* 3528 * This entry does not grow upwards. Since the address lies 3529 * beyond this entry, the next entry (if one exists) has to 3530 * be a downward growable entry. The entry list header is 3531 * never a growable entry, so it suffices to check the flags. 3532 */ 3533 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 3534 vm_map_unlock_read(map); 3535 return (KERN_SUCCESS); 3536 } 3537 stack_entry = next_entry; 3538 } else { 3539 /* 3540 * This entry grows upward. If the next entry does not at 3541 * least grow downwards, this is the entry we need to grow. 3542 * otherwise we have two possible choices and we have to 3543 * select one. 3544 */ 3545 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 3546 /* 3547 * We have two choices; grow the entry closest to 3548 * the address to minimize the amount of growth. 3549 */ 3550 if (addr - prev_entry->end <= next_entry->start - addr) 3551 stack_entry = prev_entry; 3552 else 3553 stack_entry = next_entry; 3554 } else 3555 stack_entry = prev_entry; 3556 } 3557 3558 if (stack_entry == next_entry) { 3559 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 3560 KASSERT(addr < stack_entry->start, ("foo")); 3561 end = (prev_entry != &map->header) ? prev_entry->end : 3562 stack_entry->start - stack_entry->avail_ssize; 3563 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 3564 max_grow = stack_entry->start - end; 3565 } else { 3566 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 3567 KASSERT(addr >= stack_entry->end, ("foo")); 3568 end = (next_entry != &map->header) ? next_entry->start : 3569 stack_entry->end + stack_entry->avail_ssize; 3570 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 3571 max_grow = end - stack_entry->end; 3572 } 3573 3574 if (grow_amount > stack_entry->avail_ssize) { 3575 vm_map_unlock_read(map); 3576 return (KERN_NO_SPACE); 3577 } 3578 3579 /* 3580 * If there is no longer enough space between the entries nogo, and 3581 * adjust the available space. Note: this should only happen if the 3582 * user has mapped into the stack area after the stack was created, 3583 * and is probably an error. 3584 * 3585 * This also effectively destroys any guard page the user might have 3586 * intended by limiting the stack size. 3587 */ 3588 if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) { 3589 if (vm_map_lock_upgrade(map)) 3590 goto Retry; 3591 3592 stack_entry->avail_ssize = max_grow; 3593 3594 vm_map_unlock(map); 3595 return (KERN_NO_SPACE); 3596 } 3597 3598 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 3599 3600 /* 3601 * If this is the main process stack, see if we're over the stack 3602 * limit. 3603 */ 3604 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3605 vm_map_unlock_read(map); 3606 return (KERN_NO_SPACE); 3607 } 3608 #ifdef RACCT 3609 PROC_LOCK(p); 3610 if (is_procstack && 3611 racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) { 3612 PROC_UNLOCK(p); 3613 vm_map_unlock_read(map); 3614 return (KERN_NO_SPACE); 3615 } 3616 PROC_UNLOCK(p); 3617 #endif 3618 3619 /* Round up the grow amount modulo sgrowsiz */ 3620 growsize = sgrowsiz; 3621 grow_amount = roundup(grow_amount, growsize); 3622 if (grow_amount > stack_entry->avail_ssize) 3623 grow_amount = stack_entry->avail_ssize; 3624 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3625 grow_amount = trunc_page((vm_size_t)stacklim) - 3626 ctob(vm->vm_ssize); 3627 } 3628 #ifdef notyet 3629 PROC_LOCK(p); 3630 limit = racct_get_available(p, RACCT_STACK); 3631 PROC_UNLOCK(p); 3632 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3633 grow_amount = limit - ctob(vm->vm_ssize); 3634 #endif 3635 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 3636 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3637 vm_map_unlock_read(map); 3638 rv = KERN_NO_SPACE; 3639 goto out; 3640 } 3641 #ifdef RACCT 3642 PROC_LOCK(p); 3643 if (racct_set(p, RACCT_MEMLOCK, 3644 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3645 PROC_UNLOCK(p); 3646 vm_map_unlock_read(map); 3647 rv = KERN_NO_SPACE; 3648 goto out; 3649 } 3650 PROC_UNLOCK(p); 3651 #endif 3652 } 3653 /* If we would blow our VMEM resource limit, no go */ 3654 if (map->size + grow_amount > vmemlim) { 3655 vm_map_unlock_read(map); 3656 rv = KERN_NO_SPACE; 3657 goto out; 3658 } 3659 #ifdef RACCT 3660 PROC_LOCK(p); 3661 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3662 PROC_UNLOCK(p); 3663 vm_map_unlock_read(map); 3664 rv = KERN_NO_SPACE; 3665 goto out; 3666 } 3667 PROC_UNLOCK(p); 3668 #endif 3669 3670 if (vm_map_lock_upgrade(map)) 3671 goto Retry; 3672 3673 if (stack_entry == next_entry) { 3674 /* 3675 * Growing downward. 3676 */ 3677 /* Get the preliminary new entry start value */ 3678 addr = stack_entry->start - grow_amount; 3679 3680 /* 3681 * If this puts us into the previous entry, cut back our 3682 * growth to the available space. Also, see the note above. 3683 */ 3684 if (addr < end) { 3685 stack_entry->avail_ssize = max_grow; 3686 addr = end; 3687 if (stack_guard_page) 3688 addr += PAGE_SIZE; 3689 } 3690 3691 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 3692 next_entry->protection, next_entry->max_protection, 3693 MAP_STACK_GROWS_DOWN); 3694 3695 /* Adjust the available stack space by the amount we grew. */ 3696 if (rv == KERN_SUCCESS) { 3697 new_entry = prev_entry->next; 3698 KASSERT(new_entry == stack_entry->prev, ("foo")); 3699 KASSERT(new_entry->end == stack_entry->start, ("foo")); 3700 KASSERT(new_entry->start == addr, ("foo")); 3701 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 3702 0, ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3703 grow_amount = new_entry->end - new_entry->start; 3704 new_entry->avail_ssize = stack_entry->avail_ssize - 3705 grow_amount; 3706 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 3707 } 3708 } else { 3709 /* 3710 * Growing upward. 3711 */ 3712 addr = stack_entry->end + grow_amount; 3713 3714 /* 3715 * If this puts us into the next entry, cut back our growth 3716 * to the available space. Also, see the note above. 3717 */ 3718 if (addr > end) { 3719 stack_entry->avail_ssize = end - stack_entry->end; 3720 addr = end; 3721 if (stack_guard_page) 3722 addr -= PAGE_SIZE; 3723 } 3724 3725 grow_amount = addr - stack_entry->end; 3726 cred = stack_entry->cred; 3727 if (cred == NULL && stack_entry->object.vm_object != NULL) 3728 cred = stack_entry->object.vm_object->cred; 3729 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3730 rv = KERN_NO_SPACE; 3731 /* Grow the underlying object if applicable. */ 3732 else if (stack_entry->object.vm_object == NULL || 3733 vm_object_coalesce(stack_entry->object.vm_object, 3734 stack_entry->offset, 3735 (vm_size_t)(stack_entry->end - stack_entry->start), 3736 (vm_size_t)grow_amount, cred != NULL)) { 3737 map->size += (addr - stack_entry->end); 3738 /* Update the current entry. */ 3739 stack_entry->end = addr; 3740 stack_entry->avail_ssize -= grow_amount; 3741 vm_map_entry_resize_free(map, stack_entry); 3742 rv = KERN_SUCCESS; 3743 } else 3744 rv = KERN_FAILURE; 3745 } 3746 3747 if (rv == KERN_SUCCESS && is_procstack) 3748 vm->vm_ssize += btoc(grow_amount); 3749 3750 vm_map_unlock(map); 3751 3752 /* 3753 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3754 */ 3755 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 3756 vm_map_wire(map, 3757 (stack_entry == next_entry) ? addr : addr - grow_amount, 3758 (stack_entry == next_entry) ? stack_entry->start : addr, 3759 (p->p_flag & P_SYSTEM) 3760 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 3761 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 3762 } 3763 3764 out: 3765 #ifdef RACCT 3766 if (rv != KERN_SUCCESS) { 3767 PROC_LOCK(p); 3768 error = racct_set(p, RACCT_VMEM, map->size); 3769 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3770 if (!old_mlock) { 3771 error = racct_set(p, RACCT_MEMLOCK, 3772 ptoa(pmap_wired_count(map->pmap))); 3773 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3774 } 3775 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3776 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3777 PROC_UNLOCK(p); 3778 } 3779 #endif 3780 3781 return (rv); 3782 } 3783 3784 /* 3785 * Unshare the specified VM space for exec. If other processes are 3786 * mapped to it, then create a new one. The new vmspace is null. 3787 */ 3788 int 3789 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3790 { 3791 struct vmspace *oldvmspace = p->p_vmspace; 3792 struct vmspace *newvmspace; 3793 3794 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3795 ("vmspace_exec recursed")); 3796 newvmspace = vmspace_alloc(minuser, maxuser, NULL); 3797 if (newvmspace == NULL) 3798 return (ENOMEM); 3799 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3800 /* 3801 * This code is written like this for prototype purposes. The 3802 * goal is to avoid running down the vmspace here, but let the 3803 * other process's that are still using the vmspace to finally 3804 * run it down. Even though there is little or no chance of blocking 3805 * here, it is a good idea to keep this form for future mods. 3806 */ 3807 PROC_VMSPACE_LOCK(p); 3808 p->p_vmspace = newvmspace; 3809 PROC_VMSPACE_UNLOCK(p); 3810 if (p == curthread->td_proc) 3811 pmap_activate(curthread); 3812 curthread->td_pflags |= TDP_EXECVMSPC; 3813 return (0); 3814 } 3815 3816 /* 3817 * Unshare the specified VM space for forcing COW. This 3818 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3819 */ 3820 int 3821 vmspace_unshare(struct proc *p) 3822 { 3823 struct vmspace *oldvmspace = p->p_vmspace; 3824 struct vmspace *newvmspace; 3825 vm_ooffset_t fork_charge; 3826 3827 if (oldvmspace->vm_refcnt == 1) 3828 return (0); 3829 fork_charge = 0; 3830 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 3831 if (newvmspace == NULL) 3832 return (ENOMEM); 3833 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 3834 vmspace_free(newvmspace); 3835 return (ENOMEM); 3836 } 3837 PROC_VMSPACE_LOCK(p); 3838 p->p_vmspace = newvmspace; 3839 PROC_VMSPACE_UNLOCK(p); 3840 if (p == curthread->td_proc) 3841 pmap_activate(curthread); 3842 vmspace_free(oldvmspace); 3843 return (0); 3844 } 3845 3846 /* 3847 * vm_map_lookup: 3848 * 3849 * Finds the VM object, offset, and 3850 * protection for a given virtual address in the 3851 * specified map, assuming a page fault of the 3852 * type specified. 3853 * 3854 * Leaves the map in question locked for read; return 3855 * values are guaranteed until a vm_map_lookup_done 3856 * call is performed. Note that the map argument 3857 * is in/out; the returned map must be used in 3858 * the call to vm_map_lookup_done. 3859 * 3860 * A handle (out_entry) is returned for use in 3861 * vm_map_lookup_done, to make that fast. 3862 * 3863 * If a lookup is requested with "write protection" 3864 * specified, the map may be changed to perform virtual 3865 * copying operations, although the data referenced will 3866 * remain the same. 3867 */ 3868 int 3869 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3870 vm_offset_t vaddr, 3871 vm_prot_t fault_typea, 3872 vm_map_entry_t *out_entry, /* OUT */ 3873 vm_object_t *object, /* OUT */ 3874 vm_pindex_t *pindex, /* OUT */ 3875 vm_prot_t *out_prot, /* OUT */ 3876 boolean_t *wired) /* OUT */ 3877 { 3878 vm_map_entry_t entry; 3879 vm_map_t map = *var_map; 3880 vm_prot_t prot; 3881 vm_prot_t fault_type = fault_typea; 3882 vm_object_t eobject; 3883 vm_size_t size; 3884 struct ucred *cred; 3885 3886 RetryLookup:; 3887 3888 vm_map_lock_read(map); 3889 3890 /* 3891 * Lookup the faulting address. 3892 */ 3893 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 3894 vm_map_unlock_read(map); 3895 return (KERN_INVALID_ADDRESS); 3896 } 3897 3898 entry = *out_entry; 3899 3900 /* 3901 * Handle submaps. 3902 */ 3903 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3904 vm_map_t old_map = map; 3905 3906 *var_map = map = entry->object.sub_map; 3907 vm_map_unlock_read(old_map); 3908 goto RetryLookup; 3909 } 3910 3911 /* 3912 * Check whether this task is allowed to have this page. 3913 */ 3914 prot = entry->protection; 3915 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3916 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 3917 vm_map_unlock_read(map); 3918 return (KERN_PROTECTION_FAILURE); 3919 } 3920 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3921 (entry->eflags & MAP_ENTRY_COW) && 3922 (fault_type & VM_PROT_WRITE)) { 3923 vm_map_unlock_read(map); 3924 return (KERN_PROTECTION_FAILURE); 3925 } 3926 if ((fault_typea & VM_PROT_COPY) != 0 && 3927 (entry->max_protection & VM_PROT_WRITE) == 0 && 3928 (entry->eflags & MAP_ENTRY_COW) == 0) { 3929 vm_map_unlock_read(map); 3930 return (KERN_PROTECTION_FAILURE); 3931 } 3932 3933 /* 3934 * If this page is not pageable, we have to get it for all possible 3935 * accesses. 3936 */ 3937 *wired = (entry->wired_count != 0); 3938 if (*wired) 3939 fault_type = entry->protection; 3940 size = entry->end - entry->start; 3941 /* 3942 * If the entry was copy-on-write, we either ... 3943 */ 3944 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3945 /* 3946 * If we want to write the page, we may as well handle that 3947 * now since we've got the map locked. 3948 * 3949 * If we don't need to write the page, we just demote the 3950 * permissions allowed. 3951 */ 3952 if ((fault_type & VM_PROT_WRITE) != 0 || 3953 (fault_typea & VM_PROT_COPY) != 0) { 3954 /* 3955 * Make a new object, and place it in the object 3956 * chain. Note that no new references have appeared 3957 * -- one just moved from the map to the new 3958 * object. 3959 */ 3960 if (vm_map_lock_upgrade(map)) 3961 goto RetryLookup; 3962 3963 if (entry->cred == NULL) { 3964 /* 3965 * The debugger owner is charged for 3966 * the memory. 3967 */ 3968 cred = curthread->td_ucred; 3969 crhold(cred); 3970 if (!swap_reserve_by_cred(size, cred)) { 3971 crfree(cred); 3972 vm_map_unlock(map); 3973 return (KERN_RESOURCE_SHORTAGE); 3974 } 3975 entry->cred = cred; 3976 } 3977 vm_object_shadow(&entry->object.vm_object, 3978 &entry->offset, size); 3979 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3980 eobject = entry->object.vm_object; 3981 if (eobject->cred != NULL) { 3982 /* 3983 * The object was not shadowed. 3984 */ 3985 swap_release_by_cred(size, entry->cred); 3986 crfree(entry->cred); 3987 entry->cred = NULL; 3988 } else if (entry->cred != NULL) { 3989 VM_OBJECT_WLOCK(eobject); 3990 eobject->cred = entry->cred; 3991 eobject->charge = size; 3992 VM_OBJECT_WUNLOCK(eobject); 3993 entry->cred = NULL; 3994 } 3995 3996 vm_map_lock_downgrade(map); 3997 } else { 3998 /* 3999 * We're attempting to read a copy-on-write page -- 4000 * don't allow writes. 4001 */ 4002 prot &= ~VM_PROT_WRITE; 4003 } 4004 } 4005 4006 /* 4007 * Create an object if necessary. 4008 */ 4009 if (entry->object.vm_object == NULL && 4010 !map->system_map) { 4011 if (vm_map_lock_upgrade(map)) 4012 goto RetryLookup; 4013 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4014 atop(size)); 4015 entry->offset = 0; 4016 if (entry->cred != NULL) { 4017 VM_OBJECT_WLOCK(entry->object.vm_object); 4018 entry->object.vm_object->cred = entry->cred; 4019 entry->object.vm_object->charge = size; 4020 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4021 entry->cred = NULL; 4022 } 4023 vm_map_lock_downgrade(map); 4024 } 4025 4026 /* 4027 * Return the object/offset from this entry. If the entry was 4028 * copy-on-write or empty, it has been fixed up. 4029 */ 4030 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4031 *object = entry->object.vm_object; 4032 4033 *out_prot = prot; 4034 return (KERN_SUCCESS); 4035 } 4036 4037 /* 4038 * vm_map_lookup_locked: 4039 * 4040 * Lookup the faulting address. A version of vm_map_lookup that returns 4041 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4042 */ 4043 int 4044 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4045 vm_offset_t vaddr, 4046 vm_prot_t fault_typea, 4047 vm_map_entry_t *out_entry, /* OUT */ 4048 vm_object_t *object, /* OUT */ 4049 vm_pindex_t *pindex, /* OUT */ 4050 vm_prot_t *out_prot, /* OUT */ 4051 boolean_t *wired) /* OUT */ 4052 { 4053 vm_map_entry_t entry; 4054 vm_map_t map = *var_map; 4055 vm_prot_t prot; 4056 vm_prot_t fault_type = fault_typea; 4057 4058 /* 4059 * Lookup the faulting address. 4060 */ 4061 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4062 return (KERN_INVALID_ADDRESS); 4063 4064 entry = *out_entry; 4065 4066 /* 4067 * Fail if the entry refers to a submap. 4068 */ 4069 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4070 return (KERN_FAILURE); 4071 4072 /* 4073 * Check whether this task is allowed to have this page. 4074 */ 4075 prot = entry->protection; 4076 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4077 if ((fault_type & prot) != fault_type) 4078 return (KERN_PROTECTION_FAILURE); 4079 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 4080 (entry->eflags & MAP_ENTRY_COW) && 4081 (fault_type & VM_PROT_WRITE)) 4082 return (KERN_PROTECTION_FAILURE); 4083 4084 /* 4085 * If this page is not pageable, we have to get it for all possible 4086 * accesses. 4087 */ 4088 *wired = (entry->wired_count != 0); 4089 if (*wired) 4090 fault_type = entry->protection; 4091 4092 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4093 /* 4094 * Fail if the entry was copy-on-write for a write fault. 4095 */ 4096 if (fault_type & VM_PROT_WRITE) 4097 return (KERN_FAILURE); 4098 /* 4099 * We're attempting to read a copy-on-write page -- 4100 * don't allow writes. 4101 */ 4102 prot &= ~VM_PROT_WRITE; 4103 } 4104 4105 /* 4106 * Fail if an object should be created. 4107 */ 4108 if (entry->object.vm_object == NULL && !map->system_map) 4109 return (KERN_FAILURE); 4110 4111 /* 4112 * Return the object/offset from this entry. If the entry was 4113 * copy-on-write or empty, it has been fixed up. 4114 */ 4115 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4116 *object = entry->object.vm_object; 4117 4118 *out_prot = prot; 4119 return (KERN_SUCCESS); 4120 } 4121 4122 /* 4123 * vm_map_lookup_done: 4124 * 4125 * Releases locks acquired by a vm_map_lookup 4126 * (according to the handle returned by that lookup). 4127 */ 4128 void 4129 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4130 { 4131 /* 4132 * Unlock the main-level map 4133 */ 4134 vm_map_unlock_read(map); 4135 } 4136 4137 #include "opt_ddb.h" 4138 #ifdef DDB 4139 #include <sys/kernel.h> 4140 4141 #include <ddb/ddb.h> 4142 4143 static void 4144 vm_map_print(vm_map_t map) 4145 { 4146 vm_map_entry_t entry; 4147 4148 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4149 (void *)map, 4150 (void *)map->pmap, map->nentries, map->timestamp); 4151 4152 db_indent += 2; 4153 for (entry = map->header.next; entry != &map->header; 4154 entry = entry->next) { 4155 db_iprintf("map entry %p: start=%p, end=%p\n", 4156 (void *)entry, (void *)entry->start, (void *)entry->end); 4157 { 4158 static char *inheritance_name[4] = 4159 {"share", "copy", "none", "donate_copy"}; 4160 4161 db_iprintf(" prot=%x/%x/%s", 4162 entry->protection, 4163 entry->max_protection, 4164 inheritance_name[(int)(unsigned char)entry->inheritance]); 4165 if (entry->wired_count != 0) 4166 db_printf(", wired"); 4167 } 4168 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4169 db_printf(", share=%p, offset=0x%jx\n", 4170 (void *)entry->object.sub_map, 4171 (uintmax_t)entry->offset); 4172 if ((entry->prev == &map->header) || 4173 (entry->prev->object.sub_map != 4174 entry->object.sub_map)) { 4175 db_indent += 2; 4176 vm_map_print((vm_map_t)entry->object.sub_map); 4177 db_indent -= 2; 4178 } 4179 } else { 4180 if (entry->cred != NULL) 4181 db_printf(", ruid %d", entry->cred->cr_ruid); 4182 db_printf(", object=%p, offset=0x%jx", 4183 (void *)entry->object.vm_object, 4184 (uintmax_t)entry->offset); 4185 if (entry->object.vm_object && entry->object.vm_object->cred) 4186 db_printf(", obj ruid %d charge %jx", 4187 entry->object.vm_object->cred->cr_ruid, 4188 (uintmax_t)entry->object.vm_object->charge); 4189 if (entry->eflags & MAP_ENTRY_COW) 4190 db_printf(", copy (%s)", 4191 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4192 db_printf("\n"); 4193 4194 if ((entry->prev == &map->header) || 4195 (entry->prev->object.vm_object != 4196 entry->object.vm_object)) { 4197 db_indent += 2; 4198 vm_object_print((db_expr_t)(intptr_t) 4199 entry->object.vm_object, 4200 0, 0, (char *)0); 4201 db_indent -= 2; 4202 } 4203 } 4204 } 4205 db_indent -= 2; 4206 } 4207 4208 DB_SHOW_COMMAND(map, map) 4209 { 4210 4211 if (!have_addr) { 4212 db_printf("usage: show map <addr>\n"); 4213 return; 4214 } 4215 vm_map_print((vm_map_t)addr); 4216 } 4217 4218 DB_SHOW_COMMAND(procvm, procvm) 4219 { 4220 struct proc *p; 4221 4222 if (have_addr) { 4223 p = (struct proc *) addr; 4224 } else { 4225 p = curproc; 4226 } 4227 4228 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4229 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4230 (void *)vmspace_pmap(p->p_vmspace)); 4231 4232 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4233 } 4234 4235 #endif /* DDB */ 4236