1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/mman.h> 77 #include <sys/vnode.h> 78 #include <sys/racct.h> 79 #include <sys/resourcevar.h> 80 #include <sys/rwlock.h> 81 #include <sys/file.h> 82 #include <sys/sysctl.h> 83 #include <sys/sysent.h> 84 #include <sys/shm.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vnode_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/uma.h> 98 99 /* 100 * Virtual memory maps provide for the mapping, protection, 101 * and sharing of virtual memory objects. In addition, 102 * this module provides for an efficient virtual copy of 103 * memory from one map to another. 104 * 105 * Synchronization is required prior to most operations. 106 * 107 * Maps consist of an ordered doubly-linked list of simple 108 * entries; a self-adjusting binary search tree of these 109 * entries is used to speed up lookups. 110 * 111 * Since portions of maps are specified by start/end addresses, 112 * which may not align with existing map entries, all 113 * routines merely "clip" entries to these start/end values. 114 * [That is, an entry is split into two, bordering at a 115 * start or end value.] Note that these clippings may not 116 * always be necessary (as the two resulting entries are then 117 * not changed); however, the clipping is done for convenience. 118 * 119 * As mentioned above, virtual copy operations are performed 120 * by copying VM object references from one map to 121 * another, and then marking both regions as copy-on-write. 122 */ 123 124 static struct mtx map_sleep_mtx; 125 static uma_zone_t mapentzone; 126 static uma_zone_t kmapentzone; 127 static uma_zone_t mapzone; 128 static uma_zone_t vmspace_zone; 129 static int vmspace_zinit(void *mem, int size, int flags); 130 static void vmspace_zfini(void *mem, int size); 131 static int vm_map_zinit(void *mem, int ize, int flags); 132 static void vm_map_zfini(void *mem, int size); 133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 134 vm_offset_t max); 135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 137 #ifdef INVARIANTS 138 static void vm_map_zdtor(void *mem, int size, void *arg); 139 static void vmspace_zdtor(void *mem, int size, void *arg); 140 #endif 141 142 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 143 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 144 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 145 146 /* 147 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 148 * stable. 149 */ 150 #define PROC_VMSPACE_LOCK(p) do { } while (0) 151 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 152 153 /* 154 * VM_MAP_RANGE_CHECK: [ internal use only ] 155 * 156 * Asserts that the starting and ending region 157 * addresses fall within the valid range of the map. 158 */ 159 #define VM_MAP_RANGE_CHECK(map, start, end) \ 160 { \ 161 if (start < vm_map_min(map)) \ 162 start = vm_map_min(map); \ 163 if (end > vm_map_max(map)) \ 164 end = vm_map_max(map); \ 165 if (start > end) \ 166 start = end; \ 167 } 168 169 /* 170 * vm_map_startup: 171 * 172 * Initialize the vm_map module. Must be called before 173 * any other vm_map routines. 174 * 175 * Map and entry structures are allocated from the general 176 * purpose memory pool with some exceptions: 177 * 178 * - The kernel map and kmem submap are allocated statically. 179 * - Kernel map entries are allocated out of a static pool. 180 * 181 * These restrictions are necessary since malloc() uses the 182 * maps and requires map entries. 183 */ 184 185 void 186 vm_map_startup(void) 187 { 188 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 189 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 190 #ifdef INVARIANTS 191 vm_map_zdtor, 192 #else 193 NULL, 194 #endif 195 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 196 uma_prealloc(mapzone, MAX_KMAP); 197 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 198 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 199 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 200 uma_prealloc(kmapentzone, MAX_KMAPENT); 201 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 202 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 203 } 204 205 static void 206 vmspace_zfini(void *mem, int size) 207 { 208 struct vmspace *vm; 209 210 vm = (struct vmspace *)mem; 211 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 212 } 213 214 static int 215 vmspace_zinit(void *mem, int size, int flags) 216 { 217 struct vmspace *vm; 218 219 vm = (struct vmspace *)mem; 220 221 vm->vm_map.pmap = NULL; 222 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 223 return (0); 224 } 225 226 static void 227 vm_map_zfini(void *mem, int size) 228 { 229 vm_map_t map; 230 231 map = (vm_map_t)mem; 232 mtx_destroy(&map->system_mtx); 233 sx_destroy(&map->lock); 234 } 235 236 static int 237 vm_map_zinit(void *mem, int size, int flags) 238 { 239 vm_map_t map; 240 241 map = (vm_map_t)mem; 242 map->nentries = 0; 243 map->size = 0; 244 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 245 sx_init(&map->lock, "vm map (user)"); 246 return (0); 247 } 248 249 #ifdef INVARIANTS 250 static void 251 vmspace_zdtor(void *mem, int size, void *arg) 252 { 253 struct vmspace *vm; 254 255 vm = (struct vmspace *)mem; 256 257 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 258 } 259 static void 260 vm_map_zdtor(void *mem, int size, void *arg) 261 { 262 vm_map_t map; 263 264 map = (vm_map_t)mem; 265 KASSERT(map->nentries == 0, 266 ("map %p nentries == %d on free.", 267 map, map->nentries)); 268 KASSERT(map->size == 0, 269 ("map %p size == %lu on free.", 270 map, (unsigned long)map->size)); 271 } 272 #endif /* INVARIANTS */ 273 274 /* 275 * Allocate a vmspace structure, including a vm_map and pmap, 276 * and initialize those structures. The refcnt is set to 1. 277 */ 278 struct vmspace * 279 vmspace_alloc(min, max) 280 vm_offset_t min, max; 281 { 282 struct vmspace *vm; 283 284 vm = uma_zalloc(vmspace_zone, M_WAITOK); 285 if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) { 286 uma_zfree(vmspace_zone, vm); 287 return (NULL); 288 } 289 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 290 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 291 vm->vm_refcnt = 1; 292 vm->vm_shm = NULL; 293 vm->vm_swrss = 0; 294 vm->vm_tsize = 0; 295 vm->vm_dsize = 0; 296 vm->vm_ssize = 0; 297 vm->vm_taddr = 0; 298 vm->vm_daddr = 0; 299 vm->vm_maxsaddr = 0; 300 return (vm); 301 } 302 303 void 304 vm_init2(void) 305 { 306 uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count, 307 (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + 308 maxproc * 2 + maxfiles); 309 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 310 #ifdef INVARIANTS 311 vmspace_zdtor, 312 #else 313 NULL, 314 #endif 315 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 316 } 317 318 static void 319 vmspace_container_reset(struct proc *p) 320 { 321 322 #ifdef RACCT 323 PROC_LOCK(p); 324 racct_set(p, RACCT_DATA, 0); 325 racct_set(p, RACCT_STACK, 0); 326 racct_set(p, RACCT_RSS, 0); 327 racct_set(p, RACCT_MEMLOCK, 0); 328 racct_set(p, RACCT_VMEM, 0); 329 PROC_UNLOCK(p); 330 #endif 331 } 332 333 static inline void 334 vmspace_dofree(struct vmspace *vm) 335 { 336 337 CTR1(KTR_VM, "vmspace_free: %p", vm); 338 339 /* 340 * Make sure any SysV shm is freed, it might not have been in 341 * exit1(). 342 */ 343 shmexit(vm); 344 345 /* 346 * Lock the map, to wait out all other references to it. 347 * Delete all of the mappings and pages they hold, then call 348 * the pmap module to reclaim anything left. 349 */ 350 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 351 vm->vm_map.max_offset); 352 353 pmap_release(vmspace_pmap(vm)); 354 vm->vm_map.pmap = NULL; 355 uma_zfree(vmspace_zone, vm); 356 } 357 358 void 359 vmspace_free(struct vmspace *vm) 360 { 361 362 if (vm->vm_refcnt == 0) 363 panic("vmspace_free: attempt to free already freed vmspace"); 364 365 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 366 vmspace_dofree(vm); 367 } 368 369 void 370 vmspace_exitfree(struct proc *p) 371 { 372 struct vmspace *vm; 373 374 PROC_VMSPACE_LOCK(p); 375 vm = p->p_vmspace; 376 p->p_vmspace = NULL; 377 PROC_VMSPACE_UNLOCK(p); 378 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 379 vmspace_free(vm); 380 } 381 382 void 383 vmspace_exit(struct thread *td) 384 { 385 int refcnt; 386 struct vmspace *vm; 387 struct proc *p; 388 389 /* 390 * Release user portion of address space. 391 * This releases references to vnodes, 392 * which could cause I/O if the file has been unlinked. 393 * Need to do this early enough that we can still sleep. 394 * 395 * The last exiting process to reach this point releases as 396 * much of the environment as it can. vmspace_dofree() is the 397 * slower fallback in case another process had a temporary 398 * reference to the vmspace. 399 */ 400 401 p = td->td_proc; 402 vm = p->p_vmspace; 403 atomic_add_int(&vmspace0.vm_refcnt, 1); 404 do { 405 refcnt = vm->vm_refcnt; 406 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 407 /* Switch now since other proc might free vmspace */ 408 PROC_VMSPACE_LOCK(p); 409 p->p_vmspace = &vmspace0; 410 PROC_VMSPACE_UNLOCK(p); 411 pmap_activate(td); 412 } 413 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 414 if (refcnt == 1) { 415 if (p->p_vmspace != vm) { 416 /* vmspace not yet freed, switch back */ 417 PROC_VMSPACE_LOCK(p); 418 p->p_vmspace = vm; 419 PROC_VMSPACE_UNLOCK(p); 420 pmap_activate(td); 421 } 422 pmap_remove_pages(vmspace_pmap(vm)); 423 /* Switch now since this proc will free vmspace */ 424 PROC_VMSPACE_LOCK(p); 425 p->p_vmspace = &vmspace0; 426 PROC_VMSPACE_UNLOCK(p); 427 pmap_activate(td); 428 vmspace_dofree(vm); 429 } 430 vmspace_container_reset(p); 431 } 432 433 /* Acquire reference to vmspace owned by another process. */ 434 435 struct vmspace * 436 vmspace_acquire_ref(struct proc *p) 437 { 438 struct vmspace *vm; 439 int refcnt; 440 441 PROC_VMSPACE_LOCK(p); 442 vm = p->p_vmspace; 443 if (vm == NULL) { 444 PROC_VMSPACE_UNLOCK(p); 445 return (NULL); 446 } 447 do { 448 refcnt = vm->vm_refcnt; 449 if (refcnt <= 0) { /* Avoid 0->1 transition */ 450 PROC_VMSPACE_UNLOCK(p); 451 return (NULL); 452 } 453 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 454 if (vm != p->p_vmspace) { 455 PROC_VMSPACE_UNLOCK(p); 456 vmspace_free(vm); 457 return (NULL); 458 } 459 PROC_VMSPACE_UNLOCK(p); 460 return (vm); 461 } 462 463 void 464 _vm_map_lock(vm_map_t map, const char *file, int line) 465 { 466 467 if (map->system_map) 468 mtx_lock_flags_(&map->system_mtx, 0, file, line); 469 else 470 sx_xlock_(&map->lock, file, line); 471 map->timestamp++; 472 } 473 474 static void 475 vm_map_process_deferred(void) 476 { 477 struct thread *td; 478 vm_map_entry_t entry, next; 479 vm_object_t object; 480 481 td = curthread; 482 entry = td->td_map_def_user; 483 td->td_map_def_user = NULL; 484 while (entry != NULL) { 485 next = entry->next; 486 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 487 /* 488 * Decrement the object's writemappings and 489 * possibly the vnode's v_writecount. 490 */ 491 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 492 ("Submap with writecount")); 493 object = entry->object.vm_object; 494 KASSERT(object != NULL, ("No object for writecount")); 495 vnode_pager_release_writecount(object, entry->start, 496 entry->end); 497 } 498 vm_map_entry_deallocate(entry, FALSE); 499 entry = next; 500 } 501 } 502 503 void 504 _vm_map_unlock(vm_map_t map, const char *file, int line) 505 { 506 507 if (map->system_map) 508 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 509 else { 510 sx_xunlock_(&map->lock, file, line); 511 vm_map_process_deferred(); 512 } 513 } 514 515 void 516 _vm_map_lock_read(vm_map_t map, const char *file, int line) 517 { 518 519 if (map->system_map) 520 mtx_lock_flags_(&map->system_mtx, 0, file, line); 521 else 522 sx_slock_(&map->lock, file, line); 523 } 524 525 void 526 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 527 { 528 529 if (map->system_map) 530 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 531 else { 532 sx_sunlock_(&map->lock, file, line); 533 vm_map_process_deferred(); 534 } 535 } 536 537 int 538 _vm_map_trylock(vm_map_t map, const char *file, int line) 539 { 540 int error; 541 542 error = map->system_map ? 543 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 544 !sx_try_xlock_(&map->lock, file, line); 545 if (error == 0) 546 map->timestamp++; 547 return (error == 0); 548 } 549 550 int 551 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 552 { 553 int error; 554 555 error = map->system_map ? 556 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 557 !sx_try_slock_(&map->lock, file, line); 558 return (error == 0); 559 } 560 561 /* 562 * _vm_map_lock_upgrade: [ internal use only ] 563 * 564 * Tries to upgrade a read (shared) lock on the specified map to a write 565 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 566 * non-zero value if the upgrade fails. If the upgrade fails, the map is 567 * returned without a read or write lock held. 568 * 569 * Requires that the map be read locked. 570 */ 571 int 572 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 573 { 574 unsigned int last_timestamp; 575 576 if (map->system_map) { 577 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 578 } else { 579 if (!sx_try_upgrade_(&map->lock, file, line)) { 580 last_timestamp = map->timestamp; 581 sx_sunlock_(&map->lock, file, line); 582 vm_map_process_deferred(); 583 /* 584 * If the map's timestamp does not change while the 585 * map is unlocked, then the upgrade succeeds. 586 */ 587 sx_xlock_(&map->lock, file, line); 588 if (last_timestamp != map->timestamp) { 589 sx_xunlock_(&map->lock, file, line); 590 return (1); 591 } 592 } 593 } 594 map->timestamp++; 595 return (0); 596 } 597 598 void 599 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 600 { 601 602 if (map->system_map) { 603 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 604 } else 605 sx_downgrade_(&map->lock, file, line); 606 } 607 608 /* 609 * vm_map_locked: 610 * 611 * Returns a non-zero value if the caller holds a write (exclusive) lock 612 * on the specified map and the value "0" otherwise. 613 */ 614 int 615 vm_map_locked(vm_map_t map) 616 { 617 618 if (map->system_map) 619 return (mtx_owned(&map->system_mtx)); 620 else 621 return (sx_xlocked(&map->lock)); 622 } 623 624 #ifdef INVARIANTS 625 static void 626 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 627 { 628 629 if (map->system_map) 630 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 631 else 632 sx_assert_(&map->lock, SA_XLOCKED, file, line); 633 } 634 635 #define VM_MAP_ASSERT_LOCKED(map) \ 636 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 637 #else 638 #define VM_MAP_ASSERT_LOCKED(map) 639 #endif 640 641 /* 642 * _vm_map_unlock_and_wait: 643 * 644 * Atomically releases the lock on the specified map and puts the calling 645 * thread to sleep. The calling thread will remain asleep until either 646 * vm_map_wakeup() is performed on the map or the specified timeout is 647 * exceeded. 648 * 649 * WARNING! This function does not perform deferred deallocations of 650 * objects and map entries. Therefore, the calling thread is expected to 651 * reacquire the map lock after reawakening and later perform an ordinary 652 * unlock operation, such as vm_map_unlock(), before completing its 653 * operation on the map. 654 */ 655 int 656 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 657 { 658 659 mtx_lock(&map_sleep_mtx); 660 if (map->system_map) 661 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 662 else 663 sx_xunlock_(&map->lock, file, line); 664 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 665 timo)); 666 } 667 668 /* 669 * vm_map_wakeup: 670 * 671 * Awaken any threads that have slept on the map using 672 * vm_map_unlock_and_wait(). 673 */ 674 void 675 vm_map_wakeup(vm_map_t map) 676 { 677 678 /* 679 * Acquire and release map_sleep_mtx to prevent a wakeup() 680 * from being performed (and lost) between the map unlock 681 * and the msleep() in _vm_map_unlock_and_wait(). 682 */ 683 mtx_lock(&map_sleep_mtx); 684 mtx_unlock(&map_sleep_mtx); 685 wakeup(&map->root); 686 } 687 688 void 689 vm_map_busy(vm_map_t map) 690 { 691 692 VM_MAP_ASSERT_LOCKED(map); 693 map->busy++; 694 } 695 696 void 697 vm_map_unbusy(vm_map_t map) 698 { 699 700 VM_MAP_ASSERT_LOCKED(map); 701 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 702 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 703 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 704 wakeup(&map->busy); 705 } 706 } 707 708 void 709 vm_map_wait_busy(vm_map_t map) 710 { 711 712 VM_MAP_ASSERT_LOCKED(map); 713 while (map->busy) { 714 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 715 if (map->system_map) 716 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 717 else 718 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 719 } 720 map->timestamp++; 721 } 722 723 long 724 vmspace_resident_count(struct vmspace *vmspace) 725 { 726 return pmap_resident_count(vmspace_pmap(vmspace)); 727 } 728 729 /* 730 * vm_map_create: 731 * 732 * Creates and returns a new empty VM map with 733 * the given physical map structure, and having 734 * the given lower and upper address bounds. 735 */ 736 vm_map_t 737 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 738 { 739 vm_map_t result; 740 741 result = uma_zalloc(mapzone, M_WAITOK); 742 CTR1(KTR_VM, "vm_map_create: %p", result); 743 _vm_map_init(result, pmap, min, max); 744 return (result); 745 } 746 747 /* 748 * Initialize an existing vm_map structure 749 * such as that in the vmspace structure. 750 */ 751 static void 752 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 753 { 754 755 map->header.next = map->header.prev = &map->header; 756 map->needs_wakeup = FALSE; 757 map->system_map = 0; 758 map->pmap = pmap; 759 map->min_offset = min; 760 map->max_offset = max; 761 map->flags = 0; 762 map->root = NULL; 763 map->timestamp = 0; 764 map->busy = 0; 765 } 766 767 void 768 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 769 { 770 771 _vm_map_init(map, pmap, min, max); 772 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 773 sx_init(&map->lock, "user map"); 774 } 775 776 /* 777 * vm_map_entry_dispose: [ internal use only ] 778 * 779 * Inverse of vm_map_entry_create. 780 */ 781 static void 782 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 783 { 784 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 785 } 786 787 /* 788 * vm_map_entry_create: [ internal use only ] 789 * 790 * Allocates a VM map entry for insertion. 791 * No entry fields are filled in. 792 */ 793 static vm_map_entry_t 794 vm_map_entry_create(vm_map_t map) 795 { 796 vm_map_entry_t new_entry; 797 798 if (map->system_map) 799 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 800 else 801 new_entry = uma_zalloc(mapentzone, M_WAITOK); 802 if (new_entry == NULL) 803 panic("vm_map_entry_create: kernel resources exhausted"); 804 return (new_entry); 805 } 806 807 /* 808 * vm_map_entry_set_behavior: 809 * 810 * Set the expected access behavior, either normal, random, or 811 * sequential. 812 */ 813 static inline void 814 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 815 { 816 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 817 (behavior & MAP_ENTRY_BEHAV_MASK); 818 } 819 820 /* 821 * vm_map_entry_set_max_free: 822 * 823 * Set the max_free field in a vm_map_entry. 824 */ 825 static inline void 826 vm_map_entry_set_max_free(vm_map_entry_t entry) 827 { 828 829 entry->max_free = entry->adj_free; 830 if (entry->left != NULL && entry->left->max_free > entry->max_free) 831 entry->max_free = entry->left->max_free; 832 if (entry->right != NULL && entry->right->max_free > entry->max_free) 833 entry->max_free = entry->right->max_free; 834 } 835 836 /* 837 * vm_map_entry_splay: 838 * 839 * The Sleator and Tarjan top-down splay algorithm with the 840 * following variation. Max_free must be computed bottom-up, so 841 * on the downward pass, maintain the left and right spines in 842 * reverse order. Then, make a second pass up each side to fix 843 * the pointers and compute max_free. The time bound is O(log n) 844 * amortized. 845 * 846 * The new root is the vm_map_entry containing "addr", or else an 847 * adjacent entry (lower or higher) if addr is not in the tree. 848 * 849 * The map must be locked, and leaves it so. 850 * 851 * Returns: the new root. 852 */ 853 static vm_map_entry_t 854 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 855 { 856 vm_map_entry_t llist, rlist; 857 vm_map_entry_t ltree, rtree; 858 vm_map_entry_t y; 859 860 /* Special case of empty tree. */ 861 if (root == NULL) 862 return (root); 863 864 /* 865 * Pass One: Splay down the tree until we find addr or a NULL 866 * pointer where addr would go. llist and rlist are the two 867 * sides in reverse order (bottom-up), with llist linked by 868 * the right pointer and rlist linked by the left pointer in 869 * the vm_map_entry. Wait until Pass Two to set max_free on 870 * the two spines. 871 */ 872 llist = NULL; 873 rlist = NULL; 874 for (;;) { 875 /* root is never NULL in here. */ 876 if (addr < root->start) { 877 y = root->left; 878 if (y == NULL) 879 break; 880 if (addr < y->start && y->left != NULL) { 881 /* Rotate right and put y on rlist. */ 882 root->left = y->right; 883 y->right = root; 884 vm_map_entry_set_max_free(root); 885 root = y->left; 886 y->left = rlist; 887 rlist = y; 888 } else { 889 /* Put root on rlist. */ 890 root->left = rlist; 891 rlist = root; 892 root = y; 893 } 894 } else if (addr >= root->end) { 895 y = root->right; 896 if (y == NULL) 897 break; 898 if (addr >= y->end && y->right != NULL) { 899 /* Rotate left and put y on llist. */ 900 root->right = y->left; 901 y->left = root; 902 vm_map_entry_set_max_free(root); 903 root = y->right; 904 y->right = llist; 905 llist = y; 906 } else { 907 /* Put root on llist. */ 908 root->right = llist; 909 llist = root; 910 root = y; 911 } 912 } else 913 break; 914 } 915 916 /* 917 * Pass Two: Walk back up the two spines, flip the pointers 918 * and set max_free. The subtrees of the root go at the 919 * bottom of llist and rlist. 920 */ 921 ltree = root->left; 922 while (llist != NULL) { 923 y = llist->right; 924 llist->right = ltree; 925 vm_map_entry_set_max_free(llist); 926 ltree = llist; 927 llist = y; 928 } 929 rtree = root->right; 930 while (rlist != NULL) { 931 y = rlist->left; 932 rlist->left = rtree; 933 vm_map_entry_set_max_free(rlist); 934 rtree = rlist; 935 rlist = y; 936 } 937 938 /* 939 * Final assembly: add ltree and rtree as subtrees of root. 940 */ 941 root->left = ltree; 942 root->right = rtree; 943 vm_map_entry_set_max_free(root); 944 945 return (root); 946 } 947 948 /* 949 * vm_map_entry_{un,}link: 950 * 951 * Insert/remove entries from maps. 952 */ 953 static void 954 vm_map_entry_link(vm_map_t map, 955 vm_map_entry_t after_where, 956 vm_map_entry_t entry) 957 { 958 959 CTR4(KTR_VM, 960 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 961 map->nentries, entry, after_where); 962 VM_MAP_ASSERT_LOCKED(map); 963 map->nentries++; 964 entry->prev = after_where; 965 entry->next = after_where->next; 966 entry->next->prev = entry; 967 after_where->next = entry; 968 969 if (after_where != &map->header) { 970 if (after_where != map->root) 971 vm_map_entry_splay(after_where->start, map->root); 972 entry->right = after_where->right; 973 entry->left = after_where; 974 after_where->right = NULL; 975 after_where->adj_free = entry->start - after_where->end; 976 vm_map_entry_set_max_free(after_where); 977 } else { 978 entry->right = map->root; 979 entry->left = NULL; 980 } 981 entry->adj_free = (entry->next == &map->header ? map->max_offset : 982 entry->next->start) - entry->end; 983 vm_map_entry_set_max_free(entry); 984 map->root = entry; 985 } 986 987 static void 988 vm_map_entry_unlink(vm_map_t map, 989 vm_map_entry_t entry) 990 { 991 vm_map_entry_t next, prev, root; 992 993 VM_MAP_ASSERT_LOCKED(map); 994 if (entry != map->root) 995 vm_map_entry_splay(entry->start, map->root); 996 if (entry->left == NULL) 997 root = entry->right; 998 else { 999 root = vm_map_entry_splay(entry->start, entry->left); 1000 root->right = entry->right; 1001 root->adj_free = (entry->next == &map->header ? map->max_offset : 1002 entry->next->start) - root->end; 1003 vm_map_entry_set_max_free(root); 1004 } 1005 map->root = root; 1006 1007 prev = entry->prev; 1008 next = entry->next; 1009 next->prev = prev; 1010 prev->next = next; 1011 map->nentries--; 1012 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1013 map->nentries, entry); 1014 } 1015 1016 /* 1017 * vm_map_entry_resize_free: 1018 * 1019 * Recompute the amount of free space following a vm_map_entry 1020 * and propagate that value up the tree. Call this function after 1021 * resizing a map entry in-place, that is, without a call to 1022 * vm_map_entry_link() or _unlink(). 1023 * 1024 * The map must be locked, and leaves it so. 1025 */ 1026 static void 1027 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1028 { 1029 1030 /* 1031 * Using splay trees without parent pointers, propagating 1032 * max_free up the tree is done by moving the entry to the 1033 * root and making the change there. 1034 */ 1035 if (entry != map->root) 1036 map->root = vm_map_entry_splay(entry->start, map->root); 1037 1038 entry->adj_free = (entry->next == &map->header ? map->max_offset : 1039 entry->next->start) - entry->end; 1040 vm_map_entry_set_max_free(entry); 1041 } 1042 1043 /* 1044 * vm_map_lookup_entry: [ internal use only ] 1045 * 1046 * Finds the map entry containing (or 1047 * immediately preceding) the specified address 1048 * in the given map; the entry is returned 1049 * in the "entry" parameter. The boolean 1050 * result indicates whether the address is 1051 * actually contained in the map. 1052 */ 1053 boolean_t 1054 vm_map_lookup_entry( 1055 vm_map_t map, 1056 vm_offset_t address, 1057 vm_map_entry_t *entry) /* OUT */ 1058 { 1059 vm_map_entry_t cur; 1060 boolean_t locked; 1061 1062 /* 1063 * If the map is empty, then the map entry immediately preceding 1064 * "address" is the map's header. 1065 */ 1066 cur = map->root; 1067 if (cur == NULL) 1068 *entry = &map->header; 1069 else if (address >= cur->start && cur->end > address) { 1070 *entry = cur; 1071 return (TRUE); 1072 } else if ((locked = vm_map_locked(map)) || 1073 sx_try_upgrade(&map->lock)) { 1074 /* 1075 * Splay requires a write lock on the map. However, it only 1076 * restructures the binary search tree; it does not otherwise 1077 * change the map. Thus, the map's timestamp need not change 1078 * on a temporary upgrade. 1079 */ 1080 map->root = cur = vm_map_entry_splay(address, cur); 1081 if (!locked) 1082 sx_downgrade(&map->lock); 1083 1084 /* 1085 * If "address" is contained within a map entry, the new root 1086 * is that map entry. Otherwise, the new root is a map entry 1087 * immediately before or after "address". 1088 */ 1089 if (address >= cur->start) { 1090 *entry = cur; 1091 if (cur->end > address) 1092 return (TRUE); 1093 } else 1094 *entry = cur->prev; 1095 } else 1096 /* 1097 * Since the map is only locked for read access, perform a 1098 * standard binary search tree lookup for "address". 1099 */ 1100 for (;;) { 1101 if (address < cur->start) { 1102 if (cur->left == NULL) { 1103 *entry = cur->prev; 1104 break; 1105 } 1106 cur = cur->left; 1107 } else if (cur->end > address) { 1108 *entry = cur; 1109 return (TRUE); 1110 } else { 1111 if (cur->right == NULL) { 1112 *entry = cur; 1113 break; 1114 } 1115 cur = cur->right; 1116 } 1117 } 1118 return (FALSE); 1119 } 1120 1121 /* 1122 * vm_map_insert: 1123 * 1124 * Inserts the given whole VM object into the target 1125 * map at the specified address range. The object's 1126 * size should match that of the address range. 1127 * 1128 * Requires that the map be locked, and leaves it so. 1129 * 1130 * If object is non-NULL, ref count must be bumped by caller 1131 * prior to making call to account for the new entry. 1132 */ 1133 int 1134 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1135 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 1136 int cow) 1137 { 1138 vm_map_entry_t new_entry; 1139 vm_map_entry_t prev_entry; 1140 vm_map_entry_t temp_entry; 1141 vm_eflags_t protoeflags; 1142 struct ucred *cred; 1143 vm_inherit_t inheritance; 1144 boolean_t charge_prev_obj; 1145 1146 VM_MAP_ASSERT_LOCKED(map); 1147 1148 /* 1149 * Check that the start and end points are not bogus. 1150 */ 1151 if ((start < map->min_offset) || (end > map->max_offset) || 1152 (start >= end)) 1153 return (KERN_INVALID_ADDRESS); 1154 1155 /* 1156 * Find the entry prior to the proposed starting address; if it's part 1157 * of an existing entry, this range is bogus. 1158 */ 1159 if (vm_map_lookup_entry(map, start, &temp_entry)) 1160 return (KERN_NO_SPACE); 1161 1162 prev_entry = temp_entry; 1163 1164 /* 1165 * Assert that the next entry doesn't overlap the end point. 1166 */ 1167 if ((prev_entry->next != &map->header) && 1168 (prev_entry->next->start < end)) 1169 return (KERN_NO_SPACE); 1170 1171 protoeflags = 0; 1172 charge_prev_obj = FALSE; 1173 1174 if (cow & MAP_COPY_ON_WRITE) 1175 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1176 1177 if (cow & MAP_NOFAULT) { 1178 protoeflags |= MAP_ENTRY_NOFAULT; 1179 1180 KASSERT(object == NULL, 1181 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1182 } 1183 if (cow & MAP_DISABLE_SYNCER) 1184 protoeflags |= MAP_ENTRY_NOSYNC; 1185 if (cow & MAP_DISABLE_COREDUMP) 1186 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1187 if (cow & MAP_VN_WRITECOUNT) 1188 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1189 if (cow & MAP_INHERIT_SHARE) 1190 inheritance = VM_INHERIT_SHARE; 1191 else 1192 inheritance = VM_INHERIT_DEFAULT; 1193 1194 cred = NULL; 1195 KASSERT((object != kmem_object && object != kernel_object) || 1196 ((object == kmem_object || object == kernel_object) && 1197 !(protoeflags & MAP_ENTRY_NEEDS_COPY)), 1198 ("kmem or kernel object and cow")); 1199 if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) 1200 goto charged; 1201 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1202 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1203 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1204 return (KERN_RESOURCE_SHORTAGE); 1205 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || 1206 object->cred == NULL, 1207 ("OVERCOMMIT: vm_map_insert o %p", object)); 1208 cred = curthread->td_ucred; 1209 crhold(cred); 1210 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY)) 1211 charge_prev_obj = TRUE; 1212 } 1213 1214 charged: 1215 /* Expand the kernel pmap, if necessary. */ 1216 if (map == kernel_map && end > kernel_vm_end) 1217 pmap_growkernel(end); 1218 if (object != NULL) { 1219 /* 1220 * OBJ_ONEMAPPING must be cleared unless this mapping 1221 * is trivially proven to be the only mapping for any 1222 * of the object's pages. (Object granularity 1223 * reference counting is insufficient to recognize 1224 * aliases with precision.) 1225 */ 1226 VM_OBJECT_WLOCK(object); 1227 if (object->ref_count > 1 || object->shadow_count != 0) 1228 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1229 VM_OBJECT_WUNLOCK(object); 1230 } 1231 else if ((prev_entry != &map->header) && 1232 (prev_entry->eflags == protoeflags) && 1233 (prev_entry->end == start) && 1234 (prev_entry->wired_count == 0) && 1235 (prev_entry->cred == cred || 1236 (prev_entry->object.vm_object != NULL && 1237 (prev_entry->object.vm_object->cred == cred))) && 1238 vm_object_coalesce(prev_entry->object.vm_object, 1239 prev_entry->offset, 1240 (vm_size_t)(prev_entry->end - prev_entry->start), 1241 (vm_size_t)(end - prev_entry->end), charge_prev_obj)) { 1242 /* 1243 * We were able to extend the object. Determine if we 1244 * can extend the previous map entry to include the 1245 * new range as well. 1246 */ 1247 if ((prev_entry->inheritance == inheritance) && 1248 (prev_entry->protection == prot) && 1249 (prev_entry->max_protection == max)) { 1250 map->size += (end - prev_entry->end); 1251 prev_entry->end = end; 1252 vm_map_entry_resize_free(map, prev_entry); 1253 vm_map_simplify_entry(map, prev_entry); 1254 if (cred != NULL) 1255 crfree(cred); 1256 return (KERN_SUCCESS); 1257 } 1258 1259 /* 1260 * If we can extend the object but cannot extend the 1261 * map entry, we have to create a new map entry. We 1262 * must bump the ref count on the extended object to 1263 * account for it. object may be NULL. 1264 */ 1265 object = prev_entry->object.vm_object; 1266 offset = prev_entry->offset + 1267 (prev_entry->end - prev_entry->start); 1268 vm_object_reference(object); 1269 if (cred != NULL && object != NULL && object->cred != NULL && 1270 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1271 /* Object already accounts for this uid. */ 1272 crfree(cred); 1273 cred = NULL; 1274 } 1275 } 1276 1277 /* 1278 * NOTE: if conditionals fail, object can be NULL here. This occurs 1279 * in things like the buffer map where we manage kva but do not manage 1280 * backing objects. 1281 */ 1282 1283 /* 1284 * Create a new entry 1285 */ 1286 new_entry = vm_map_entry_create(map); 1287 new_entry->start = start; 1288 new_entry->end = end; 1289 new_entry->cred = NULL; 1290 1291 new_entry->eflags = protoeflags; 1292 new_entry->object.vm_object = object; 1293 new_entry->offset = offset; 1294 new_entry->avail_ssize = 0; 1295 1296 new_entry->inheritance = inheritance; 1297 new_entry->protection = prot; 1298 new_entry->max_protection = max; 1299 new_entry->wired_count = 0; 1300 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1301 new_entry->next_read = OFF_TO_IDX(offset); 1302 1303 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1304 ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); 1305 new_entry->cred = cred; 1306 1307 /* 1308 * Insert the new entry into the list 1309 */ 1310 vm_map_entry_link(map, prev_entry, new_entry); 1311 map->size += new_entry->end - new_entry->start; 1312 1313 /* 1314 * It may be possible to merge the new entry with the next and/or 1315 * previous entries. However, due to MAP_STACK_* being a hack, a 1316 * panic can result from merging such entries. 1317 */ 1318 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0) 1319 vm_map_simplify_entry(map, new_entry); 1320 1321 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 1322 vm_map_pmap_enter(map, start, prot, 1323 object, OFF_TO_IDX(offset), end - start, 1324 cow & MAP_PREFAULT_PARTIAL); 1325 } 1326 1327 return (KERN_SUCCESS); 1328 } 1329 1330 /* 1331 * vm_map_findspace: 1332 * 1333 * Find the first fit (lowest VM address) for "length" free bytes 1334 * beginning at address >= start in the given map. 1335 * 1336 * In a vm_map_entry, "adj_free" is the amount of free space 1337 * adjacent (higher address) to this entry, and "max_free" is the 1338 * maximum amount of contiguous free space in its subtree. This 1339 * allows finding a free region in one path down the tree, so 1340 * O(log n) amortized with splay trees. 1341 * 1342 * The map must be locked, and leaves it so. 1343 * 1344 * Returns: 0 on success, and starting address in *addr, 1345 * 1 if insufficient space. 1346 */ 1347 int 1348 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1349 vm_offset_t *addr) /* OUT */ 1350 { 1351 vm_map_entry_t entry; 1352 vm_offset_t st; 1353 1354 /* 1355 * Request must fit within min/max VM address and must avoid 1356 * address wrap. 1357 */ 1358 if (start < map->min_offset) 1359 start = map->min_offset; 1360 if (start + length > map->max_offset || start + length < start) 1361 return (1); 1362 1363 /* Empty tree means wide open address space. */ 1364 if (map->root == NULL) { 1365 *addr = start; 1366 return (0); 1367 } 1368 1369 /* 1370 * After splay, if start comes before root node, then there 1371 * must be a gap from start to the root. 1372 */ 1373 map->root = vm_map_entry_splay(start, map->root); 1374 if (start + length <= map->root->start) { 1375 *addr = start; 1376 return (0); 1377 } 1378 1379 /* 1380 * Root is the last node that might begin its gap before 1381 * start, and this is the last comparison where address 1382 * wrap might be a problem. 1383 */ 1384 st = (start > map->root->end) ? start : map->root->end; 1385 if (length <= map->root->end + map->root->adj_free - st) { 1386 *addr = st; 1387 return (0); 1388 } 1389 1390 /* With max_free, can immediately tell if no solution. */ 1391 entry = map->root->right; 1392 if (entry == NULL || length > entry->max_free) 1393 return (1); 1394 1395 /* 1396 * Search the right subtree in the order: left subtree, root, 1397 * right subtree (first fit). The previous splay implies that 1398 * all regions in the right subtree have addresses > start. 1399 */ 1400 while (entry != NULL) { 1401 if (entry->left != NULL && entry->left->max_free >= length) 1402 entry = entry->left; 1403 else if (entry->adj_free >= length) { 1404 *addr = entry->end; 1405 return (0); 1406 } else 1407 entry = entry->right; 1408 } 1409 1410 /* Can't get here, so panic if we do. */ 1411 panic("vm_map_findspace: max_free corrupt"); 1412 } 1413 1414 int 1415 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1416 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1417 vm_prot_t max, int cow) 1418 { 1419 vm_offset_t end; 1420 int result; 1421 1422 end = start + length; 1423 vm_map_lock(map); 1424 VM_MAP_RANGE_CHECK(map, start, end); 1425 (void) vm_map_delete(map, start, end); 1426 result = vm_map_insert(map, object, offset, start, end, prot, 1427 max, cow); 1428 vm_map_unlock(map); 1429 return (result); 1430 } 1431 1432 /* 1433 * vm_map_find finds an unallocated region in the target address 1434 * map with the given length. The search is defined to be 1435 * first-fit from the specified address; the region found is 1436 * returned in the same parameter. 1437 * 1438 * If object is non-NULL, ref count must be bumped by caller 1439 * prior to making call to account for the new entry. 1440 */ 1441 int 1442 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1443 vm_offset_t *addr, /* IN/OUT */ 1444 vm_size_t length, int find_space, vm_prot_t prot, 1445 vm_prot_t max, int cow) 1446 { 1447 vm_offset_t start; 1448 int result; 1449 1450 start = *addr; 1451 vm_map_lock(map); 1452 do { 1453 if (find_space != VMFS_NO_SPACE) { 1454 if (vm_map_findspace(map, start, length, addr)) { 1455 vm_map_unlock(map); 1456 return (KERN_NO_SPACE); 1457 } 1458 switch (find_space) { 1459 case VMFS_ALIGNED_SPACE: 1460 pmap_align_superpage(object, offset, addr, 1461 length); 1462 break; 1463 #ifdef VMFS_TLB_ALIGNED_SPACE 1464 case VMFS_TLB_ALIGNED_SPACE: 1465 pmap_align_tlb(addr); 1466 break; 1467 #endif 1468 default: 1469 break; 1470 } 1471 1472 start = *addr; 1473 } 1474 result = vm_map_insert(map, object, offset, start, start + 1475 length, prot, max, cow); 1476 } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE 1477 #ifdef VMFS_TLB_ALIGNED_SPACE 1478 || find_space == VMFS_TLB_ALIGNED_SPACE 1479 #endif 1480 )); 1481 vm_map_unlock(map); 1482 return (result); 1483 } 1484 1485 /* 1486 * vm_map_simplify_entry: 1487 * 1488 * Simplify the given map entry by merging with either neighbor. This 1489 * routine also has the ability to merge with both neighbors. 1490 * 1491 * The map must be locked. 1492 * 1493 * This routine guarentees that the passed entry remains valid (though 1494 * possibly extended). When merging, this routine may delete one or 1495 * both neighbors. 1496 */ 1497 void 1498 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1499 { 1500 vm_map_entry_t next, prev; 1501 vm_size_t prevsize, esize; 1502 1503 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1504 return; 1505 1506 prev = entry->prev; 1507 if (prev != &map->header) { 1508 prevsize = prev->end - prev->start; 1509 if ( (prev->end == entry->start) && 1510 (prev->object.vm_object == entry->object.vm_object) && 1511 (!prev->object.vm_object || 1512 (prev->offset + prevsize == entry->offset)) && 1513 (prev->eflags == entry->eflags) && 1514 (prev->protection == entry->protection) && 1515 (prev->max_protection == entry->max_protection) && 1516 (prev->inheritance == entry->inheritance) && 1517 (prev->wired_count == entry->wired_count) && 1518 (prev->cred == entry->cred)) { 1519 vm_map_entry_unlink(map, prev); 1520 entry->start = prev->start; 1521 entry->offset = prev->offset; 1522 if (entry->prev != &map->header) 1523 vm_map_entry_resize_free(map, entry->prev); 1524 1525 /* 1526 * If the backing object is a vnode object, 1527 * vm_object_deallocate() calls vrele(). 1528 * However, vrele() does not lock the vnode 1529 * because the vnode has additional 1530 * references. Thus, the map lock can be kept 1531 * without causing a lock-order reversal with 1532 * the vnode lock. 1533 * 1534 * Since we count the number of virtual page 1535 * mappings in object->un_pager.vnp.writemappings, 1536 * the writemappings value should not be adjusted 1537 * when the entry is disposed of. 1538 */ 1539 if (prev->object.vm_object) 1540 vm_object_deallocate(prev->object.vm_object); 1541 if (prev->cred != NULL) 1542 crfree(prev->cred); 1543 vm_map_entry_dispose(map, prev); 1544 } 1545 } 1546 1547 next = entry->next; 1548 if (next != &map->header) { 1549 esize = entry->end - entry->start; 1550 if ((entry->end == next->start) && 1551 (next->object.vm_object == entry->object.vm_object) && 1552 (!entry->object.vm_object || 1553 (entry->offset + esize == next->offset)) && 1554 (next->eflags == entry->eflags) && 1555 (next->protection == entry->protection) && 1556 (next->max_protection == entry->max_protection) && 1557 (next->inheritance == entry->inheritance) && 1558 (next->wired_count == entry->wired_count) && 1559 (next->cred == entry->cred)) { 1560 vm_map_entry_unlink(map, next); 1561 entry->end = next->end; 1562 vm_map_entry_resize_free(map, entry); 1563 1564 /* 1565 * See comment above. 1566 */ 1567 if (next->object.vm_object) 1568 vm_object_deallocate(next->object.vm_object); 1569 if (next->cred != NULL) 1570 crfree(next->cred); 1571 vm_map_entry_dispose(map, next); 1572 } 1573 } 1574 } 1575 /* 1576 * vm_map_clip_start: [ internal use only ] 1577 * 1578 * Asserts that the given entry begins at or after 1579 * the specified address; if necessary, 1580 * it splits the entry into two. 1581 */ 1582 #define vm_map_clip_start(map, entry, startaddr) \ 1583 { \ 1584 if (startaddr > entry->start) \ 1585 _vm_map_clip_start(map, entry, startaddr); \ 1586 } 1587 1588 /* 1589 * This routine is called only when it is known that 1590 * the entry must be split. 1591 */ 1592 static void 1593 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1594 { 1595 vm_map_entry_t new_entry; 1596 1597 VM_MAP_ASSERT_LOCKED(map); 1598 1599 /* 1600 * Split off the front portion -- note that we must insert the new 1601 * entry BEFORE this one, so that this entry has the specified 1602 * starting address. 1603 */ 1604 vm_map_simplify_entry(map, entry); 1605 1606 /* 1607 * If there is no object backing this entry, we might as well create 1608 * one now. If we defer it, an object can get created after the map 1609 * is clipped, and individual objects will be created for the split-up 1610 * map. This is a bit of a hack, but is also about the best place to 1611 * put this improvement. 1612 */ 1613 if (entry->object.vm_object == NULL && !map->system_map) { 1614 vm_object_t object; 1615 object = vm_object_allocate(OBJT_DEFAULT, 1616 atop(entry->end - entry->start)); 1617 entry->object.vm_object = object; 1618 entry->offset = 0; 1619 if (entry->cred != NULL) { 1620 object->cred = entry->cred; 1621 object->charge = entry->end - entry->start; 1622 entry->cred = NULL; 1623 } 1624 } else if (entry->object.vm_object != NULL && 1625 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1626 entry->cred != NULL) { 1627 VM_OBJECT_WLOCK(entry->object.vm_object); 1628 KASSERT(entry->object.vm_object->cred == NULL, 1629 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1630 entry->object.vm_object->cred = entry->cred; 1631 entry->object.vm_object->charge = entry->end - entry->start; 1632 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1633 entry->cred = NULL; 1634 } 1635 1636 new_entry = vm_map_entry_create(map); 1637 *new_entry = *entry; 1638 1639 new_entry->end = start; 1640 entry->offset += (start - entry->start); 1641 entry->start = start; 1642 if (new_entry->cred != NULL) 1643 crhold(entry->cred); 1644 1645 vm_map_entry_link(map, entry->prev, new_entry); 1646 1647 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1648 vm_object_reference(new_entry->object.vm_object); 1649 /* 1650 * The object->un_pager.vnp.writemappings for the 1651 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1652 * kept as is here. The virtual pages are 1653 * re-distributed among the clipped entries, so the sum is 1654 * left the same. 1655 */ 1656 } 1657 } 1658 1659 /* 1660 * vm_map_clip_end: [ internal use only ] 1661 * 1662 * Asserts that the given entry ends at or before 1663 * the specified address; if necessary, 1664 * it splits the entry into two. 1665 */ 1666 #define vm_map_clip_end(map, entry, endaddr) \ 1667 { \ 1668 if ((endaddr) < (entry->end)) \ 1669 _vm_map_clip_end((map), (entry), (endaddr)); \ 1670 } 1671 1672 /* 1673 * This routine is called only when it is known that 1674 * the entry must be split. 1675 */ 1676 static void 1677 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1678 { 1679 vm_map_entry_t new_entry; 1680 1681 VM_MAP_ASSERT_LOCKED(map); 1682 1683 /* 1684 * If there is no object backing this entry, we might as well create 1685 * one now. If we defer it, an object can get created after the map 1686 * is clipped, and individual objects will be created for the split-up 1687 * map. This is a bit of a hack, but is also about the best place to 1688 * put this improvement. 1689 */ 1690 if (entry->object.vm_object == NULL && !map->system_map) { 1691 vm_object_t object; 1692 object = vm_object_allocate(OBJT_DEFAULT, 1693 atop(entry->end - entry->start)); 1694 entry->object.vm_object = object; 1695 entry->offset = 0; 1696 if (entry->cred != NULL) { 1697 object->cred = entry->cred; 1698 object->charge = entry->end - entry->start; 1699 entry->cred = NULL; 1700 } 1701 } else if (entry->object.vm_object != NULL && 1702 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1703 entry->cred != NULL) { 1704 VM_OBJECT_WLOCK(entry->object.vm_object); 1705 KASSERT(entry->object.vm_object->cred == NULL, 1706 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1707 entry->object.vm_object->cred = entry->cred; 1708 entry->object.vm_object->charge = entry->end - entry->start; 1709 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1710 entry->cred = NULL; 1711 } 1712 1713 /* 1714 * Create a new entry and insert it AFTER the specified entry 1715 */ 1716 new_entry = vm_map_entry_create(map); 1717 *new_entry = *entry; 1718 1719 new_entry->start = entry->end = end; 1720 new_entry->offset += (end - entry->start); 1721 if (new_entry->cred != NULL) 1722 crhold(entry->cred); 1723 1724 vm_map_entry_link(map, entry, new_entry); 1725 1726 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1727 vm_object_reference(new_entry->object.vm_object); 1728 } 1729 } 1730 1731 /* 1732 * vm_map_submap: [ kernel use only ] 1733 * 1734 * Mark the given range as handled by a subordinate map. 1735 * 1736 * This range must have been created with vm_map_find, 1737 * and no other operations may have been performed on this 1738 * range prior to calling vm_map_submap. 1739 * 1740 * Only a limited number of operations can be performed 1741 * within this rage after calling vm_map_submap: 1742 * vm_fault 1743 * [Don't try vm_map_copy!] 1744 * 1745 * To remove a submapping, one must first remove the 1746 * range from the superior map, and then destroy the 1747 * submap (if desired). [Better yet, don't try it.] 1748 */ 1749 int 1750 vm_map_submap( 1751 vm_map_t map, 1752 vm_offset_t start, 1753 vm_offset_t end, 1754 vm_map_t submap) 1755 { 1756 vm_map_entry_t entry; 1757 int result = KERN_INVALID_ARGUMENT; 1758 1759 vm_map_lock(map); 1760 1761 VM_MAP_RANGE_CHECK(map, start, end); 1762 1763 if (vm_map_lookup_entry(map, start, &entry)) { 1764 vm_map_clip_start(map, entry, start); 1765 } else 1766 entry = entry->next; 1767 1768 vm_map_clip_end(map, entry, end); 1769 1770 if ((entry->start == start) && (entry->end == end) && 1771 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1772 (entry->object.vm_object == NULL)) { 1773 entry->object.sub_map = submap; 1774 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1775 result = KERN_SUCCESS; 1776 } 1777 vm_map_unlock(map); 1778 1779 return (result); 1780 } 1781 1782 /* 1783 * The maximum number of pages to map 1784 */ 1785 #define MAX_INIT_PT 96 1786 1787 /* 1788 * vm_map_pmap_enter: 1789 * 1790 * Preload read-only mappings for the specified object's resident pages 1791 * into the target map. If "flags" is MAP_PREFAULT_PARTIAL, then only 1792 * the resident pages within the address range [addr, addr + ulmin(size, 1793 * ptoa(MAX_INIT_PT))) are mapped. Otherwise, all resident pages within 1794 * the specified address range are mapped. This eliminates many soft 1795 * faults on process startup and immediately after an mmap(2). Because 1796 * these are speculative mappings, cached pages are not reactivated and 1797 * mapped. 1798 */ 1799 void 1800 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1801 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1802 { 1803 vm_offset_t start; 1804 vm_page_t p, p_start; 1805 vm_pindex_t psize, tmpidx; 1806 1807 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1808 return; 1809 VM_OBJECT_RLOCK(object); 1810 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1811 VM_OBJECT_RUNLOCK(object); 1812 VM_OBJECT_WLOCK(object); 1813 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1814 pmap_object_init_pt(map->pmap, addr, object, pindex, 1815 size); 1816 VM_OBJECT_WUNLOCK(object); 1817 return; 1818 } 1819 VM_OBJECT_LOCK_DOWNGRADE(object); 1820 } 1821 1822 psize = atop(size); 1823 if (psize > MAX_INIT_PT && (flags & MAP_PREFAULT_PARTIAL) != 0) 1824 psize = MAX_INIT_PT; 1825 if (psize + pindex > object->size) { 1826 if (object->size < pindex) { 1827 VM_OBJECT_RUNLOCK(object); 1828 return; 1829 } 1830 psize = object->size - pindex; 1831 } 1832 1833 start = 0; 1834 p_start = NULL; 1835 1836 p = vm_page_find_least(object, pindex); 1837 /* 1838 * Assert: the variable p is either (1) the page with the 1839 * least pindex greater than or equal to the parameter pindex 1840 * or (2) NULL. 1841 */ 1842 for (; 1843 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1844 p = TAILQ_NEXT(p, listq)) { 1845 /* 1846 * don't allow an madvise to blow away our really 1847 * free pages allocating pv entries. 1848 */ 1849 if ((flags & MAP_PREFAULT_MADVISE) && 1850 cnt.v_free_count < cnt.v_free_reserved) { 1851 psize = tmpidx; 1852 break; 1853 } 1854 if (p->valid == VM_PAGE_BITS_ALL) { 1855 if (p_start == NULL) { 1856 start = addr + ptoa(tmpidx); 1857 p_start = p; 1858 } 1859 } else if (p_start != NULL) { 1860 pmap_enter_object(map->pmap, start, addr + 1861 ptoa(tmpidx), p_start, prot); 1862 p_start = NULL; 1863 } 1864 } 1865 if (p_start != NULL) 1866 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1867 p_start, prot); 1868 VM_OBJECT_RUNLOCK(object); 1869 } 1870 1871 /* 1872 * vm_map_protect: 1873 * 1874 * Sets the protection of the specified address 1875 * region in the target map. If "set_max" is 1876 * specified, the maximum protection is to be set; 1877 * otherwise, only the current protection is affected. 1878 */ 1879 int 1880 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1881 vm_prot_t new_prot, boolean_t set_max) 1882 { 1883 vm_map_entry_t current, entry; 1884 vm_object_t obj; 1885 struct ucred *cred; 1886 vm_prot_t old_prot; 1887 1888 vm_map_lock(map); 1889 1890 VM_MAP_RANGE_CHECK(map, start, end); 1891 1892 if (vm_map_lookup_entry(map, start, &entry)) { 1893 vm_map_clip_start(map, entry, start); 1894 } else { 1895 entry = entry->next; 1896 } 1897 1898 /* 1899 * Make a first pass to check for protection violations. 1900 */ 1901 current = entry; 1902 while ((current != &map->header) && (current->start < end)) { 1903 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1904 vm_map_unlock(map); 1905 return (KERN_INVALID_ARGUMENT); 1906 } 1907 if ((new_prot & current->max_protection) != new_prot) { 1908 vm_map_unlock(map); 1909 return (KERN_PROTECTION_FAILURE); 1910 } 1911 current = current->next; 1912 } 1913 1914 1915 /* 1916 * Do an accounting pass for private read-only mappings that 1917 * now will do cow due to allowed write (e.g. debugger sets 1918 * breakpoint on text segment) 1919 */ 1920 for (current = entry; (current != &map->header) && 1921 (current->start < end); current = current->next) { 1922 1923 vm_map_clip_end(map, current, end); 1924 1925 if (set_max || 1926 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 1927 ENTRY_CHARGED(current)) { 1928 continue; 1929 } 1930 1931 cred = curthread->td_ucred; 1932 obj = current->object.vm_object; 1933 1934 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 1935 if (!swap_reserve(current->end - current->start)) { 1936 vm_map_unlock(map); 1937 return (KERN_RESOURCE_SHORTAGE); 1938 } 1939 crhold(cred); 1940 current->cred = cred; 1941 continue; 1942 } 1943 1944 VM_OBJECT_WLOCK(obj); 1945 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 1946 VM_OBJECT_WUNLOCK(obj); 1947 continue; 1948 } 1949 1950 /* 1951 * Charge for the whole object allocation now, since 1952 * we cannot distinguish between non-charged and 1953 * charged clipped mapping of the same object later. 1954 */ 1955 KASSERT(obj->charge == 0, 1956 ("vm_map_protect: object %p overcharged\n", obj)); 1957 if (!swap_reserve(ptoa(obj->size))) { 1958 VM_OBJECT_WUNLOCK(obj); 1959 vm_map_unlock(map); 1960 return (KERN_RESOURCE_SHORTAGE); 1961 } 1962 1963 crhold(cred); 1964 obj->cred = cred; 1965 obj->charge = ptoa(obj->size); 1966 VM_OBJECT_WUNLOCK(obj); 1967 } 1968 1969 /* 1970 * Go back and fix up protections. [Note that clipping is not 1971 * necessary the second time.] 1972 */ 1973 current = entry; 1974 while ((current != &map->header) && (current->start < end)) { 1975 old_prot = current->protection; 1976 1977 if (set_max) 1978 current->protection = 1979 (current->max_protection = new_prot) & 1980 old_prot; 1981 else 1982 current->protection = new_prot; 1983 1984 if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED)) 1985 == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) && 1986 (current->protection & VM_PROT_WRITE) != 0 && 1987 (old_prot & VM_PROT_WRITE) == 0) { 1988 vm_fault_copy_entry(map, map, current, current, NULL); 1989 } 1990 1991 /* 1992 * When restricting access, update the physical map. Worry 1993 * about copy-on-write here. 1994 */ 1995 if ((old_prot & ~current->protection) != 0) { 1996 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1997 VM_PROT_ALL) 1998 pmap_protect(map->pmap, current->start, 1999 current->end, 2000 current->protection & MASK(current)); 2001 #undef MASK 2002 } 2003 vm_map_simplify_entry(map, current); 2004 current = current->next; 2005 } 2006 vm_map_unlock(map); 2007 return (KERN_SUCCESS); 2008 } 2009 2010 /* 2011 * vm_map_madvise: 2012 * 2013 * This routine traverses a processes map handling the madvise 2014 * system call. Advisories are classified as either those effecting 2015 * the vm_map_entry structure, or those effecting the underlying 2016 * objects. 2017 */ 2018 int 2019 vm_map_madvise( 2020 vm_map_t map, 2021 vm_offset_t start, 2022 vm_offset_t end, 2023 int behav) 2024 { 2025 vm_map_entry_t current, entry; 2026 int modify_map = 0; 2027 2028 /* 2029 * Some madvise calls directly modify the vm_map_entry, in which case 2030 * we need to use an exclusive lock on the map and we need to perform 2031 * various clipping operations. Otherwise we only need a read-lock 2032 * on the map. 2033 */ 2034 switch(behav) { 2035 case MADV_NORMAL: 2036 case MADV_SEQUENTIAL: 2037 case MADV_RANDOM: 2038 case MADV_NOSYNC: 2039 case MADV_AUTOSYNC: 2040 case MADV_NOCORE: 2041 case MADV_CORE: 2042 modify_map = 1; 2043 vm_map_lock(map); 2044 break; 2045 case MADV_WILLNEED: 2046 case MADV_DONTNEED: 2047 case MADV_FREE: 2048 vm_map_lock_read(map); 2049 break; 2050 default: 2051 return (KERN_INVALID_ARGUMENT); 2052 } 2053 2054 /* 2055 * Locate starting entry and clip if necessary. 2056 */ 2057 VM_MAP_RANGE_CHECK(map, start, end); 2058 2059 if (vm_map_lookup_entry(map, start, &entry)) { 2060 if (modify_map) 2061 vm_map_clip_start(map, entry, start); 2062 } else { 2063 entry = entry->next; 2064 } 2065 2066 if (modify_map) { 2067 /* 2068 * madvise behaviors that are implemented in the vm_map_entry. 2069 * 2070 * We clip the vm_map_entry so that behavioral changes are 2071 * limited to the specified address range. 2072 */ 2073 for (current = entry; 2074 (current != &map->header) && (current->start < end); 2075 current = current->next 2076 ) { 2077 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2078 continue; 2079 2080 vm_map_clip_end(map, current, end); 2081 2082 switch (behav) { 2083 case MADV_NORMAL: 2084 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2085 break; 2086 case MADV_SEQUENTIAL: 2087 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2088 break; 2089 case MADV_RANDOM: 2090 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2091 break; 2092 case MADV_NOSYNC: 2093 current->eflags |= MAP_ENTRY_NOSYNC; 2094 break; 2095 case MADV_AUTOSYNC: 2096 current->eflags &= ~MAP_ENTRY_NOSYNC; 2097 break; 2098 case MADV_NOCORE: 2099 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2100 break; 2101 case MADV_CORE: 2102 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2103 break; 2104 default: 2105 break; 2106 } 2107 vm_map_simplify_entry(map, current); 2108 } 2109 vm_map_unlock(map); 2110 } else { 2111 vm_pindex_t pstart, pend; 2112 2113 /* 2114 * madvise behaviors that are implemented in the underlying 2115 * vm_object. 2116 * 2117 * Since we don't clip the vm_map_entry, we have to clip 2118 * the vm_object pindex and count. 2119 */ 2120 for (current = entry; 2121 (current != &map->header) && (current->start < end); 2122 current = current->next 2123 ) { 2124 vm_offset_t useStart; 2125 2126 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2127 continue; 2128 2129 pstart = OFF_TO_IDX(current->offset); 2130 pend = pstart + atop(current->end - current->start); 2131 useStart = current->start; 2132 2133 if (current->start < start) { 2134 pstart += atop(start - current->start); 2135 useStart = start; 2136 } 2137 if (current->end > end) 2138 pend -= atop(current->end - end); 2139 2140 if (pstart >= pend) 2141 continue; 2142 2143 vm_object_madvise(current->object.vm_object, pstart, 2144 pend, behav); 2145 if (behav == MADV_WILLNEED) { 2146 vm_map_pmap_enter(map, 2147 useStart, 2148 current->protection, 2149 current->object.vm_object, 2150 pstart, 2151 ptoa(pend - pstart), 2152 MAP_PREFAULT_MADVISE 2153 ); 2154 } 2155 } 2156 vm_map_unlock_read(map); 2157 } 2158 return (0); 2159 } 2160 2161 2162 /* 2163 * vm_map_inherit: 2164 * 2165 * Sets the inheritance of the specified address 2166 * range in the target map. Inheritance 2167 * affects how the map will be shared with 2168 * child maps at the time of vmspace_fork. 2169 */ 2170 int 2171 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2172 vm_inherit_t new_inheritance) 2173 { 2174 vm_map_entry_t entry; 2175 vm_map_entry_t temp_entry; 2176 2177 switch (new_inheritance) { 2178 case VM_INHERIT_NONE: 2179 case VM_INHERIT_COPY: 2180 case VM_INHERIT_SHARE: 2181 break; 2182 default: 2183 return (KERN_INVALID_ARGUMENT); 2184 } 2185 vm_map_lock(map); 2186 VM_MAP_RANGE_CHECK(map, start, end); 2187 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2188 entry = temp_entry; 2189 vm_map_clip_start(map, entry, start); 2190 } else 2191 entry = temp_entry->next; 2192 while ((entry != &map->header) && (entry->start < end)) { 2193 vm_map_clip_end(map, entry, end); 2194 entry->inheritance = new_inheritance; 2195 vm_map_simplify_entry(map, entry); 2196 entry = entry->next; 2197 } 2198 vm_map_unlock(map); 2199 return (KERN_SUCCESS); 2200 } 2201 2202 /* 2203 * vm_map_unwire: 2204 * 2205 * Implements both kernel and user unwiring. 2206 */ 2207 int 2208 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2209 int flags) 2210 { 2211 vm_map_entry_t entry, first_entry, tmp_entry; 2212 vm_offset_t saved_start; 2213 unsigned int last_timestamp; 2214 int rv; 2215 boolean_t need_wakeup, result, user_unwire; 2216 2217 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2218 vm_map_lock(map); 2219 VM_MAP_RANGE_CHECK(map, start, end); 2220 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2221 if (flags & VM_MAP_WIRE_HOLESOK) 2222 first_entry = first_entry->next; 2223 else { 2224 vm_map_unlock(map); 2225 return (KERN_INVALID_ADDRESS); 2226 } 2227 } 2228 last_timestamp = map->timestamp; 2229 entry = first_entry; 2230 while (entry != &map->header && entry->start < end) { 2231 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2232 /* 2233 * We have not yet clipped the entry. 2234 */ 2235 saved_start = (start >= entry->start) ? start : 2236 entry->start; 2237 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2238 if (vm_map_unlock_and_wait(map, 0)) { 2239 /* 2240 * Allow interruption of user unwiring? 2241 */ 2242 } 2243 vm_map_lock(map); 2244 if (last_timestamp+1 != map->timestamp) { 2245 /* 2246 * Look again for the entry because the map was 2247 * modified while it was unlocked. 2248 * Specifically, the entry may have been 2249 * clipped, merged, or deleted. 2250 */ 2251 if (!vm_map_lookup_entry(map, saved_start, 2252 &tmp_entry)) { 2253 if (flags & VM_MAP_WIRE_HOLESOK) 2254 tmp_entry = tmp_entry->next; 2255 else { 2256 if (saved_start == start) { 2257 /* 2258 * First_entry has been deleted. 2259 */ 2260 vm_map_unlock(map); 2261 return (KERN_INVALID_ADDRESS); 2262 } 2263 end = saved_start; 2264 rv = KERN_INVALID_ADDRESS; 2265 goto done; 2266 } 2267 } 2268 if (entry == first_entry) 2269 first_entry = tmp_entry; 2270 else 2271 first_entry = NULL; 2272 entry = tmp_entry; 2273 } 2274 last_timestamp = map->timestamp; 2275 continue; 2276 } 2277 vm_map_clip_start(map, entry, start); 2278 vm_map_clip_end(map, entry, end); 2279 /* 2280 * Mark the entry in case the map lock is released. (See 2281 * above.) 2282 */ 2283 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2284 /* 2285 * Check the map for holes in the specified region. 2286 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2287 */ 2288 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2289 (entry->end < end && (entry->next == &map->header || 2290 entry->next->start > entry->end))) { 2291 end = entry->end; 2292 rv = KERN_INVALID_ADDRESS; 2293 goto done; 2294 } 2295 /* 2296 * If system unwiring, require that the entry is system wired. 2297 */ 2298 if (!user_unwire && 2299 vm_map_entry_system_wired_count(entry) == 0) { 2300 end = entry->end; 2301 rv = KERN_INVALID_ARGUMENT; 2302 goto done; 2303 } 2304 entry = entry->next; 2305 } 2306 rv = KERN_SUCCESS; 2307 done: 2308 need_wakeup = FALSE; 2309 if (first_entry == NULL) { 2310 result = vm_map_lookup_entry(map, start, &first_entry); 2311 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2312 first_entry = first_entry->next; 2313 else 2314 KASSERT(result, ("vm_map_unwire: lookup failed")); 2315 } 2316 entry = first_entry; 2317 while (entry != &map->header && entry->start < end) { 2318 if (rv == KERN_SUCCESS && (!user_unwire || 2319 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2320 if (user_unwire) 2321 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2322 entry->wired_count--; 2323 if (entry->wired_count == 0) { 2324 /* 2325 * Retain the map lock. 2326 */ 2327 vm_fault_unwire(map, entry->start, entry->end, 2328 entry->object.vm_object != NULL && 2329 (entry->object.vm_object->flags & 2330 OBJ_FICTITIOUS) != 0); 2331 } 2332 } 2333 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2334 ("vm_map_unwire: in-transition flag missing")); 2335 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2336 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2337 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2338 need_wakeup = TRUE; 2339 } 2340 vm_map_simplify_entry(map, entry); 2341 entry = entry->next; 2342 } 2343 vm_map_unlock(map); 2344 if (need_wakeup) 2345 vm_map_wakeup(map); 2346 return (rv); 2347 } 2348 2349 /* 2350 * vm_map_wire: 2351 * 2352 * Implements both kernel and user wiring. 2353 */ 2354 int 2355 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2356 int flags) 2357 { 2358 vm_map_entry_t entry, first_entry, tmp_entry; 2359 vm_offset_t saved_end, saved_start; 2360 unsigned int last_timestamp; 2361 int rv; 2362 boolean_t fictitious, need_wakeup, result, user_wire; 2363 vm_prot_t prot; 2364 2365 prot = 0; 2366 if (flags & VM_MAP_WIRE_WRITE) 2367 prot |= VM_PROT_WRITE; 2368 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2369 vm_map_lock(map); 2370 VM_MAP_RANGE_CHECK(map, start, end); 2371 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2372 if (flags & VM_MAP_WIRE_HOLESOK) 2373 first_entry = first_entry->next; 2374 else { 2375 vm_map_unlock(map); 2376 return (KERN_INVALID_ADDRESS); 2377 } 2378 } 2379 last_timestamp = map->timestamp; 2380 entry = first_entry; 2381 while (entry != &map->header && entry->start < end) { 2382 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2383 /* 2384 * We have not yet clipped the entry. 2385 */ 2386 saved_start = (start >= entry->start) ? start : 2387 entry->start; 2388 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2389 if (vm_map_unlock_and_wait(map, 0)) { 2390 /* 2391 * Allow interruption of user wiring? 2392 */ 2393 } 2394 vm_map_lock(map); 2395 if (last_timestamp + 1 != map->timestamp) { 2396 /* 2397 * Look again for the entry because the map was 2398 * modified while it was unlocked. 2399 * Specifically, the entry may have been 2400 * clipped, merged, or deleted. 2401 */ 2402 if (!vm_map_lookup_entry(map, saved_start, 2403 &tmp_entry)) { 2404 if (flags & VM_MAP_WIRE_HOLESOK) 2405 tmp_entry = tmp_entry->next; 2406 else { 2407 if (saved_start == start) { 2408 /* 2409 * first_entry has been deleted. 2410 */ 2411 vm_map_unlock(map); 2412 return (KERN_INVALID_ADDRESS); 2413 } 2414 end = saved_start; 2415 rv = KERN_INVALID_ADDRESS; 2416 goto done; 2417 } 2418 } 2419 if (entry == first_entry) 2420 first_entry = tmp_entry; 2421 else 2422 first_entry = NULL; 2423 entry = tmp_entry; 2424 } 2425 last_timestamp = map->timestamp; 2426 continue; 2427 } 2428 vm_map_clip_start(map, entry, start); 2429 vm_map_clip_end(map, entry, end); 2430 /* 2431 * Mark the entry in case the map lock is released. (See 2432 * above.) 2433 */ 2434 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2435 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2436 || (entry->protection & prot) != prot) { 2437 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2438 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2439 end = entry->end; 2440 rv = KERN_INVALID_ADDRESS; 2441 goto done; 2442 } 2443 goto next_entry; 2444 } 2445 if (entry->wired_count == 0) { 2446 entry->wired_count++; 2447 saved_start = entry->start; 2448 saved_end = entry->end; 2449 fictitious = entry->object.vm_object != NULL && 2450 (entry->object.vm_object->flags & 2451 OBJ_FICTITIOUS) != 0; 2452 /* 2453 * Release the map lock, relying on the in-transition 2454 * mark. Mark the map busy for fork. 2455 */ 2456 vm_map_busy(map); 2457 vm_map_unlock(map); 2458 rv = vm_fault_wire(map, saved_start, saved_end, 2459 fictitious); 2460 vm_map_lock(map); 2461 vm_map_unbusy(map); 2462 if (last_timestamp + 1 != map->timestamp) { 2463 /* 2464 * Look again for the entry because the map was 2465 * modified while it was unlocked. The entry 2466 * may have been clipped, but NOT merged or 2467 * deleted. 2468 */ 2469 result = vm_map_lookup_entry(map, saved_start, 2470 &tmp_entry); 2471 KASSERT(result, ("vm_map_wire: lookup failed")); 2472 if (entry == first_entry) 2473 first_entry = tmp_entry; 2474 else 2475 first_entry = NULL; 2476 entry = tmp_entry; 2477 while (entry->end < saved_end) { 2478 if (rv != KERN_SUCCESS) { 2479 KASSERT(entry->wired_count == 1, 2480 ("vm_map_wire: bad count")); 2481 entry->wired_count = -1; 2482 } 2483 entry = entry->next; 2484 } 2485 } 2486 last_timestamp = map->timestamp; 2487 if (rv != KERN_SUCCESS) { 2488 KASSERT(entry->wired_count == 1, 2489 ("vm_map_wire: bad count")); 2490 /* 2491 * Assign an out-of-range value to represent 2492 * the failure to wire this entry. 2493 */ 2494 entry->wired_count = -1; 2495 end = entry->end; 2496 goto done; 2497 } 2498 } else if (!user_wire || 2499 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2500 entry->wired_count++; 2501 } 2502 /* 2503 * Check the map for holes in the specified region. 2504 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2505 */ 2506 next_entry: 2507 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2508 (entry->end < end && (entry->next == &map->header || 2509 entry->next->start > entry->end))) { 2510 end = entry->end; 2511 rv = KERN_INVALID_ADDRESS; 2512 goto done; 2513 } 2514 entry = entry->next; 2515 } 2516 rv = KERN_SUCCESS; 2517 done: 2518 need_wakeup = FALSE; 2519 if (first_entry == NULL) { 2520 result = vm_map_lookup_entry(map, start, &first_entry); 2521 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2522 first_entry = first_entry->next; 2523 else 2524 KASSERT(result, ("vm_map_wire: lookup failed")); 2525 } 2526 entry = first_entry; 2527 while (entry != &map->header && entry->start < end) { 2528 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2529 goto next_entry_done; 2530 if (rv == KERN_SUCCESS) { 2531 if (user_wire) 2532 entry->eflags |= MAP_ENTRY_USER_WIRED; 2533 } else if (entry->wired_count == -1) { 2534 /* 2535 * Wiring failed on this entry. Thus, unwiring is 2536 * unnecessary. 2537 */ 2538 entry->wired_count = 0; 2539 } else { 2540 if (!user_wire || 2541 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 2542 entry->wired_count--; 2543 if (entry->wired_count == 0) { 2544 /* 2545 * Retain the map lock. 2546 */ 2547 vm_fault_unwire(map, entry->start, entry->end, 2548 entry->object.vm_object != NULL && 2549 (entry->object.vm_object->flags & 2550 OBJ_FICTITIOUS) != 0); 2551 } 2552 } 2553 next_entry_done: 2554 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2555 ("vm_map_wire: in-transition flag missing")); 2556 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED); 2557 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2558 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2559 need_wakeup = TRUE; 2560 } 2561 vm_map_simplify_entry(map, entry); 2562 entry = entry->next; 2563 } 2564 vm_map_unlock(map); 2565 if (need_wakeup) 2566 vm_map_wakeup(map); 2567 return (rv); 2568 } 2569 2570 /* 2571 * vm_map_sync 2572 * 2573 * Push any dirty cached pages in the address range to their pager. 2574 * If syncio is TRUE, dirty pages are written synchronously. 2575 * If invalidate is TRUE, any cached pages are freed as well. 2576 * 2577 * If the size of the region from start to end is zero, we are 2578 * supposed to flush all modified pages within the region containing 2579 * start. Unfortunately, a region can be split or coalesced with 2580 * neighboring regions, making it difficult to determine what the 2581 * original region was. Therefore, we approximate this requirement by 2582 * flushing the current region containing start. 2583 * 2584 * Returns an error if any part of the specified range is not mapped. 2585 */ 2586 int 2587 vm_map_sync( 2588 vm_map_t map, 2589 vm_offset_t start, 2590 vm_offset_t end, 2591 boolean_t syncio, 2592 boolean_t invalidate) 2593 { 2594 vm_map_entry_t current; 2595 vm_map_entry_t entry; 2596 vm_size_t size; 2597 vm_object_t object; 2598 vm_ooffset_t offset; 2599 unsigned int last_timestamp; 2600 boolean_t failed; 2601 2602 vm_map_lock_read(map); 2603 VM_MAP_RANGE_CHECK(map, start, end); 2604 if (!vm_map_lookup_entry(map, start, &entry)) { 2605 vm_map_unlock_read(map); 2606 return (KERN_INVALID_ADDRESS); 2607 } else if (start == end) { 2608 start = entry->start; 2609 end = entry->end; 2610 } 2611 /* 2612 * Make a first pass to check for user-wired memory and holes. 2613 */ 2614 for (current = entry; current != &map->header && current->start < end; 2615 current = current->next) { 2616 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2617 vm_map_unlock_read(map); 2618 return (KERN_INVALID_ARGUMENT); 2619 } 2620 if (end > current->end && 2621 (current->next == &map->header || 2622 current->end != current->next->start)) { 2623 vm_map_unlock_read(map); 2624 return (KERN_INVALID_ADDRESS); 2625 } 2626 } 2627 2628 if (invalidate) 2629 pmap_remove(map->pmap, start, end); 2630 failed = FALSE; 2631 2632 /* 2633 * Make a second pass, cleaning/uncaching pages from the indicated 2634 * objects as we go. 2635 */ 2636 for (current = entry; current != &map->header && current->start < end;) { 2637 offset = current->offset + (start - current->start); 2638 size = (end <= current->end ? end : current->end) - start; 2639 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2640 vm_map_t smap; 2641 vm_map_entry_t tentry; 2642 vm_size_t tsize; 2643 2644 smap = current->object.sub_map; 2645 vm_map_lock_read(smap); 2646 (void) vm_map_lookup_entry(smap, offset, &tentry); 2647 tsize = tentry->end - offset; 2648 if (tsize < size) 2649 size = tsize; 2650 object = tentry->object.vm_object; 2651 offset = tentry->offset + (offset - tentry->start); 2652 vm_map_unlock_read(smap); 2653 } else { 2654 object = current->object.vm_object; 2655 } 2656 vm_object_reference(object); 2657 last_timestamp = map->timestamp; 2658 vm_map_unlock_read(map); 2659 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2660 failed = TRUE; 2661 start += size; 2662 vm_object_deallocate(object); 2663 vm_map_lock_read(map); 2664 if (last_timestamp == map->timestamp || 2665 !vm_map_lookup_entry(map, start, ¤t)) 2666 current = current->next; 2667 } 2668 2669 vm_map_unlock_read(map); 2670 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2671 } 2672 2673 /* 2674 * vm_map_entry_unwire: [ internal use only ] 2675 * 2676 * Make the region specified by this entry pageable. 2677 * 2678 * The map in question should be locked. 2679 * [This is the reason for this routine's existence.] 2680 */ 2681 static void 2682 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2683 { 2684 vm_fault_unwire(map, entry->start, entry->end, 2685 entry->object.vm_object != NULL && 2686 (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0); 2687 entry->wired_count = 0; 2688 } 2689 2690 static void 2691 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2692 { 2693 2694 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2695 vm_object_deallocate(entry->object.vm_object); 2696 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2697 } 2698 2699 /* 2700 * vm_map_entry_delete: [ internal use only ] 2701 * 2702 * Deallocate the given entry from the target map. 2703 */ 2704 static void 2705 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2706 { 2707 vm_object_t object; 2708 vm_pindex_t offidxstart, offidxend, count, size1; 2709 vm_ooffset_t size; 2710 2711 vm_map_entry_unlink(map, entry); 2712 object = entry->object.vm_object; 2713 size = entry->end - entry->start; 2714 map->size -= size; 2715 2716 if (entry->cred != NULL) { 2717 swap_release_by_cred(size, entry->cred); 2718 crfree(entry->cred); 2719 } 2720 2721 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2722 (object != NULL)) { 2723 KASSERT(entry->cred == NULL || object->cred == NULL || 2724 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2725 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 2726 count = OFF_TO_IDX(size); 2727 offidxstart = OFF_TO_IDX(entry->offset); 2728 offidxend = offidxstart + count; 2729 VM_OBJECT_WLOCK(object); 2730 if (object->ref_count != 1 && 2731 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2732 object == kernel_object || object == kmem_object)) { 2733 vm_object_collapse(object); 2734 2735 /* 2736 * The option OBJPR_NOTMAPPED can be passed here 2737 * because vm_map_delete() already performed 2738 * pmap_remove() on the only mapping to this range 2739 * of pages. 2740 */ 2741 vm_object_page_remove(object, offidxstart, offidxend, 2742 OBJPR_NOTMAPPED); 2743 if (object->type == OBJT_SWAP) 2744 swap_pager_freespace(object, offidxstart, count); 2745 if (offidxend >= object->size && 2746 offidxstart < object->size) { 2747 size1 = object->size; 2748 object->size = offidxstart; 2749 if (object->cred != NULL) { 2750 size1 -= object->size; 2751 KASSERT(object->charge >= ptoa(size1), 2752 ("vm_map_entry_delete: object->charge < 0")); 2753 swap_release_by_cred(ptoa(size1), object->cred); 2754 object->charge -= ptoa(size1); 2755 } 2756 } 2757 } 2758 VM_OBJECT_WUNLOCK(object); 2759 } else 2760 entry->object.vm_object = NULL; 2761 if (map->system_map) 2762 vm_map_entry_deallocate(entry, TRUE); 2763 else { 2764 entry->next = curthread->td_map_def_user; 2765 curthread->td_map_def_user = entry; 2766 } 2767 } 2768 2769 /* 2770 * vm_map_delete: [ internal use only ] 2771 * 2772 * Deallocates the given address range from the target 2773 * map. 2774 */ 2775 int 2776 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2777 { 2778 vm_map_entry_t entry; 2779 vm_map_entry_t first_entry; 2780 2781 VM_MAP_ASSERT_LOCKED(map); 2782 2783 /* 2784 * Find the start of the region, and clip it 2785 */ 2786 if (!vm_map_lookup_entry(map, start, &first_entry)) 2787 entry = first_entry->next; 2788 else { 2789 entry = first_entry; 2790 vm_map_clip_start(map, entry, start); 2791 } 2792 2793 /* 2794 * Step through all entries in this region 2795 */ 2796 while ((entry != &map->header) && (entry->start < end)) { 2797 vm_map_entry_t next; 2798 2799 /* 2800 * Wait for wiring or unwiring of an entry to complete. 2801 * Also wait for any system wirings to disappear on 2802 * user maps. 2803 */ 2804 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 2805 (vm_map_pmap(map) != kernel_pmap && 2806 vm_map_entry_system_wired_count(entry) != 0)) { 2807 unsigned int last_timestamp; 2808 vm_offset_t saved_start; 2809 vm_map_entry_t tmp_entry; 2810 2811 saved_start = entry->start; 2812 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2813 last_timestamp = map->timestamp; 2814 (void) vm_map_unlock_and_wait(map, 0); 2815 vm_map_lock(map); 2816 if (last_timestamp + 1 != map->timestamp) { 2817 /* 2818 * Look again for the entry because the map was 2819 * modified while it was unlocked. 2820 * Specifically, the entry may have been 2821 * clipped, merged, or deleted. 2822 */ 2823 if (!vm_map_lookup_entry(map, saved_start, 2824 &tmp_entry)) 2825 entry = tmp_entry->next; 2826 else { 2827 entry = tmp_entry; 2828 vm_map_clip_start(map, entry, 2829 saved_start); 2830 } 2831 } 2832 continue; 2833 } 2834 vm_map_clip_end(map, entry, end); 2835 2836 next = entry->next; 2837 2838 /* 2839 * Unwire before removing addresses from the pmap; otherwise, 2840 * unwiring will put the entries back in the pmap. 2841 */ 2842 if (entry->wired_count != 0) { 2843 vm_map_entry_unwire(map, entry); 2844 } 2845 2846 pmap_remove(map->pmap, entry->start, entry->end); 2847 2848 /* 2849 * Delete the entry only after removing all pmap 2850 * entries pointing to its pages. (Otherwise, its 2851 * page frames may be reallocated, and any modify bits 2852 * will be set in the wrong object!) 2853 */ 2854 vm_map_entry_delete(map, entry); 2855 entry = next; 2856 } 2857 return (KERN_SUCCESS); 2858 } 2859 2860 /* 2861 * vm_map_remove: 2862 * 2863 * Remove the given address range from the target map. 2864 * This is the exported form of vm_map_delete. 2865 */ 2866 int 2867 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2868 { 2869 int result; 2870 2871 vm_map_lock(map); 2872 VM_MAP_RANGE_CHECK(map, start, end); 2873 result = vm_map_delete(map, start, end); 2874 vm_map_unlock(map); 2875 return (result); 2876 } 2877 2878 /* 2879 * vm_map_check_protection: 2880 * 2881 * Assert that the target map allows the specified privilege on the 2882 * entire address region given. The entire region must be allocated. 2883 * 2884 * WARNING! This code does not and should not check whether the 2885 * contents of the region is accessible. For example a smaller file 2886 * might be mapped into a larger address space. 2887 * 2888 * NOTE! This code is also called by munmap(). 2889 * 2890 * The map must be locked. A read lock is sufficient. 2891 */ 2892 boolean_t 2893 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2894 vm_prot_t protection) 2895 { 2896 vm_map_entry_t entry; 2897 vm_map_entry_t tmp_entry; 2898 2899 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2900 return (FALSE); 2901 entry = tmp_entry; 2902 2903 while (start < end) { 2904 if (entry == &map->header) 2905 return (FALSE); 2906 /* 2907 * No holes allowed! 2908 */ 2909 if (start < entry->start) 2910 return (FALSE); 2911 /* 2912 * Check protection associated with entry. 2913 */ 2914 if ((entry->protection & protection) != protection) 2915 return (FALSE); 2916 /* go to next entry */ 2917 start = entry->end; 2918 entry = entry->next; 2919 } 2920 return (TRUE); 2921 } 2922 2923 /* 2924 * vm_map_copy_entry: 2925 * 2926 * Copies the contents of the source entry to the destination 2927 * entry. The entries *must* be aligned properly. 2928 */ 2929 static void 2930 vm_map_copy_entry( 2931 vm_map_t src_map, 2932 vm_map_t dst_map, 2933 vm_map_entry_t src_entry, 2934 vm_map_entry_t dst_entry, 2935 vm_ooffset_t *fork_charge) 2936 { 2937 vm_object_t src_object; 2938 vm_map_entry_t fake_entry; 2939 vm_offset_t size; 2940 struct ucred *cred; 2941 int charged; 2942 2943 VM_MAP_ASSERT_LOCKED(dst_map); 2944 2945 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2946 return; 2947 2948 if (src_entry->wired_count == 0) { 2949 2950 /* 2951 * If the source entry is marked needs_copy, it is already 2952 * write-protected. 2953 */ 2954 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2955 pmap_protect(src_map->pmap, 2956 src_entry->start, 2957 src_entry->end, 2958 src_entry->protection & ~VM_PROT_WRITE); 2959 } 2960 2961 /* 2962 * Make a copy of the object. 2963 */ 2964 size = src_entry->end - src_entry->start; 2965 if ((src_object = src_entry->object.vm_object) != NULL) { 2966 VM_OBJECT_WLOCK(src_object); 2967 charged = ENTRY_CHARGED(src_entry); 2968 if ((src_object->handle == NULL) && 2969 (src_object->type == OBJT_DEFAULT || 2970 src_object->type == OBJT_SWAP)) { 2971 vm_object_collapse(src_object); 2972 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2973 vm_object_split(src_entry); 2974 src_object = src_entry->object.vm_object; 2975 } 2976 } 2977 vm_object_reference_locked(src_object); 2978 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2979 if (src_entry->cred != NULL && 2980 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 2981 KASSERT(src_object->cred == NULL, 2982 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 2983 src_object)); 2984 src_object->cred = src_entry->cred; 2985 src_object->charge = size; 2986 } 2987 VM_OBJECT_WUNLOCK(src_object); 2988 dst_entry->object.vm_object = src_object; 2989 if (charged) { 2990 cred = curthread->td_ucred; 2991 crhold(cred); 2992 dst_entry->cred = cred; 2993 *fork_charge += size; 2994 if (!(src_entry->eflags & 2995 MAP_ENTRY_NEEDS_COPY)) { 2996 crhold(cred); 2997 src_entry->cred = cred; 2998 *fork_charge += size; 2999 } 3000 } 3001 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3002 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 3003 dst_entry->offset = src_entry->offset; 3004 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3005 /* 3006 * MAP_ENTRY_VN_WRITECNT cannot 3007 * indicate write reference from 3008 * src_entry, since the entry is 3009 * marked as needs copy. Allocate a 3010 * fake entry that is used to 3011 * decrement object->un_pager.vnp.writecount 3012 * at the appropriate time. Attach 3013 * fake_entry to the deferred list. 3014 */ 3015 fake_entry = vm_map_entry_create(dst_map); 3016 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3017 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3018 vm_object_reference(src_object); 3019 fake_entry->object.vm_object = src_object; 3020 fake_entry->start = src_entry->start; 3021 fake_entry->end = src_entry->end; 3022 fake_entry->next = curthread->td_map_def_user; 3023 curthread->td_map_def_user = fake_entry; 3024 } 3025 } else { 3026 dst_entry->object.vm_object = NULL; 3027 dst_entry->offset = 0; 3028 if (src_entry->cred != NULL) { 3029 dst_entry->cred = curthread->td_ucred; 3030 crhold(dst_entry->cred); 3031 *fork_charge += size; 3032 } 3033 } 3034 3035 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 3036 dst_entry->end - dst_entry->start, src_entry->start); 3037 } else { 3038 /* 3039 * Of course, wired down pages can't be set copy-on-write. 3040 * Cause wired pages to be copied into the new map by 3041 * simulating faults (the new pages are pageable) 3042 */ 3043 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3044 fork_charge); 3045 } 3046 } 3047 3048 /* 3049 * vmspace_map_entry_forked: 3050 * Update the newly-forked vmspace each time a map entry is inherited 3051 * or copied. The values for vm_dsize and vm_tsize are approximate 3052 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3053 */ 3054 static void 3055 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3056 vm_map_entry_t entry) 3057 { 3058 vm_size_t entrysize; 3059 vm_offset_t newend; 3060 3061 entrysize = entry->end - entry->start; 3062 vm2->vm_map.size += entrysize; 3063 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3064 vm2->vm_ssize += btoc(entrysize); 3065 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3066 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3067 newend = MIN(entry->end, 3068 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3069 vm2->vm_dsize += btoc(newend - entry->start); 3070 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3071 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3072 newend = MIN(entry->end, 3073 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3074 vm2->vm_tsize += btoc(newend - entry->start); 3075 } 3076 } 3077 3078 /* 3079 * vmspace_fork: 3080 * Create a new process vmspace structure and vm_map 3081 * based on those of an existing process. The new map 3082 * is based on the old map, according to the inheritance 3083 * values on the regions in that map. 3084 * 3085 * XXX It might be worth coalescing the entries added to the new vmspace. 3086 * 3087 * The source map must not be locked. 3088 */ 3089 struct vmspace * 3090 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3091 { 3092 struct vmspace *vm2; 3093 vm_map_t new_map, old_map; 3094 vm_map_entry_t new_entry, old_entry; 3095 vm_object_t object; 3096 int locked; 3097 3098 old_map = &vm1->vm_map; 3099 /* Copy immutable fields of vm1 to vm2. */ 3100 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 3101 if (vm2 == NULL) 3102 return (NULL); 3103 vm2->vm_taddr = vm1->vm_taddr; 3104 vm2->vm_daddr = vm1->vm_daddr; 3105 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3106 vm_map_lock(old_map); 3107 if (old_map->busy) 3108 vm_map_wait_busy(old_map); 3109 new_map = &vm2->vm_map; 3110 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3111 KASSERT(locked, ("vmspace_fork: lock failed")); 3112 3113 old_entry = old_map->header.next; 3114 3115 while (old_entry != &old_map->header) { 3116 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3117 panic("vm_map_fork: encountered a submap"); 3118 3119 switch (old_entry->inheritance) { 3120 case VM_INHERIT_NONE: 3121 break; 3122 3123 case VM_INHERIT_SHARE: 3124 /* 3125 * Clone the entry, creating the shared object if necessary. 3126 */ 3127 object = old_entry->object.vm_object; 3128 if (object == NULL) { 3129 object = vm_object_allocate(OBJT_DEFAULT, 3130 atop(old_entry->end - old_entry->start)); 3131 old_entry->object.vm_object = object; 3132 old_entry->offset = 0; 3133 if (old_entry->cred != NULL) { 3134 object->cred = old_entry->cred; 3135 object->charge = old_entry->end - 3136 old_entry->start; 3137 old_entry->cred = NULL; 3138 } 3139 } 3140 3141 /* 3142 * Add the reference before calling vm_object_shadow 3143 * to insure that a shadow object is created. 3144 */ 3145 vm_object_reference(object); 3146 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3147 vm_object_shadow(&old_entry->object.vm_object, 3148 &old_entry->offset, 3149 old_entry->end - old_entry->start); 3150 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3151 /* Transfer the second reference too. */ 3152 vm_object_reference( 3153 old_entry->object.vm_object); 3154 3155 /* 3156 * As in vm_map_simplify_entry(), the 3157 * vnode lock will not be acquired in 3158 * this call to vm_object_deallocate(). 3159 */ 3160 vm_object_deallocate(object); 3161 object = old_entry->object.vm_object; 3162 } 3163 VM_OBJECT_WLOCK(object); 3164 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3165 if (old_entry->cred != NULL) { 3166 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3167 object->cred = old_entry->cred; 3168 object->charge = old_entry->end - old_entry->start; 3169 old_entry->cred = NULL; 3170 } 3171 3172 /* 3173 * Assert the correct state of the vnode 3174 * v_writecount while the object is locked, to 3175 * not relock it later for the assertion 3176 * correctness. 3177 */ 3178 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3179 object->type == OBJT_VNODE) { 3180 KASSERT(((struct vnode *)object->handle)-> 3181 v_writecount > 0, 3182 ("vmspace_fork: v_writecount %p", object)); 3183 KASSERT(object->un_pager.vnp.writemappings > 0, 3184 ("vmspace_fork: vnp.writecount %p", 3185 object)); 3186 } 3187 VM_OBJECT_WUNLOCK(object); 3188 3189 /* 3190 * Clone the entry, referencing the shared object. 3191 */ 3192 new_entry = vm_map_entry_create(new_map); 3193 *new_entry = *old_entry; 3194 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3195 MAP_ENTRY_IN_TRANSITION); 3196 new_entry->wired_count = 0; 3197 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3198 vnode_pager_update_writecount(object, 3199 new_entry->start, new_entry->end); 3200 } 3201 3202 /* 3203 * Insert the entry into the new map -- we know we're 3204 * inserting at the end of the new map. 3205 */ 3206 vm_map_entry_link(new_map, new_map->header.prev, 3207 new_entry); 3208 vmspace_map_entry_forked(vm1, vm2, new_entry); 3209 3210 /* 3211 * Update the physical map 3212 */ 3213 pmap_copy(new_map->pmap, old_map->pmap, 3214 new_entry->start, 3215 (old_entry->end - old_entry->start), 3216 old_entry->start); 3217 break; 3218 3219 case VM_INHERIT_COPY: 3220 /* 3221 * Clone the entry and link into the map. 3222 */ 3223 new_entry = vm_map_entry_create(new_map); 3224 *new_entry = *old_entry; 3225 /* 3226 * Copied entry is COW over the old object. 3227 */ 3228 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3229 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3230 new_entry->wired_count = 0; 3231 new_entry->object.vm_object = NULL; 3232 new_entry->cred = NULL; 3233 vm_map_entry_link(new_map, new_map->header.prev, 3234 new_entry); 3235 vmspace_map_entry_forked(vm1, vm2, new_entry); 3236 vm_map_copy_entry(old_map, new_map, old_entry, 3237 new_entry, fork_charge); 3238 break; 3239 } 3240 old_entry = old_entry->next; 3241 } 3242 /* 3243 * Use inlined vm_map_unlock() to postpone handling the deferred 3244 * map entries, which cannot be done until both old_map and 3245 * new_map locks are released. 3246 */ 3247 sx_xunlock(&old_map->lock); 3248 sx_xunlock(&new_map->lock); 3249 vm_map_process_deferred(); 3250 3251 return (vm2); 3252 } 3253 3254 int 3255 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3256 vm_prot_t prot, vm_prot_t max, int cow) 3257 { 3258 vm_map_entry_t new_entry, prev_entry; 3259 vm_offset_t bot, top; 3260 vm_size_t growsize, init_ssize; 3261 int orient, rv; 3262 rlim_t lmemlim, vmemlim; 3263 3264 /* 3265 * The stack orientation is piggybacked with the cow argument. 3266 * Extract it into orient and mask the cow argument so that we 3267 * don't pass it around further. 3268 * NOTE: We explicitly allow bi-directional stacks. 3269 */ 3270 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 3271 cow &= ~orient; 3272 KASSERT(orient != 0, ("No stack grow direction")); 3273 3274 if (addrbos < vm_map_min(map) || 3275 addrbos > vm_map_max(map) || 3276 addrbos + max_ssize < addrbos) 3277 return (KERN_NO_SPACE); 3278 3279 growsize = sgrowsiz; 3280 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3281 3282 PROC_LOCK(curproc); 3283 lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK); 3284 vmemlim = lim_cur(curproc, RLIMIT_VMEM); 3285 PROC_UNLOCK(curproc); 3286 3287 vm_map_lock(map); 3288 3289 /* If addr is already mapped, no go */ 3290 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 3291 vm_map_unlock(map); 3292 return (KERN_NO_SPACE); 3293 } 3294 3295 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 3296 if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) { 3297 vm_map_unlock(map); 3298 return (KERN_NO_SPACE); 3299 } 3300 } 3301 3302 /* If we would blow our VMEM resource limit, no go */ 3303 if (map->size + init_ssize > vmemlim) { 3304 vm_map_unlock(map); 3305 return (KERN_NO_SPACE); 3306 } 3307 3308 /* 3309 * If we can't accomodate max_ssize in the current mapping, no go. 3310 * However, we need to be aware that subsequent user mappings might 3311 * map into the space we have reserved for stack, and currently this 3312 * space is not protected. 3313 * 3314 * Hopefully we will at least detect this condition when we try to 3315 * grow the stack. 3316 */ 3317 if ((prev_entry->next != &map->header) && 3318 (prev_entry->next->start < addrbos + max_ssize)) { 3319 vm_map_unlock(map); 3320 return (KERN_NO_SPACE); 3321 } 3322 3323 /* 3324 * We initially map a stack of only init_ssize. We will grow as 3325 * needed later. Depending on the orientation of the stack (i.e. 3326 * the grow direction) we either map at the top of the range, the 3327 * bottom of the range or in the middle. 3328 * 3329 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3330 * and cow to be 0. Possibly we should eliminate these as input 3331 * parameters, and just pass these values here in the insert call. 3332 */ 3333 if (orient == MAP_STACK_GROWS_DOWN) 3334 bot = addrbos + max_ssize - init_ssize; 3335 else if (orient == MAP_STACK_GROWS_UP) 3336 bot = addrbos; 3337 else 3338 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 3339 top = bot + init_ssize; 3340 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3341 3342 /* Now set the avail_ssize amount. */ 3343 if (rv == KERN_SUCCESS) { 3344 if (prev_entry != &map->header) 3345 vm_map_clip_end(map, prev_entry, bot); 3346 new_entry = prev_entry->next; 3347 if (new_entry->end != top || new_entry->start != bot) 3348 panic("Bad entry start/end for new stack entry"); 3349 3350 new_entry->avail_ssize = max_ssize - init_ssize; 3351 if (orient & MAP_STACK_GROWS_DOWN) 3352 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 3353 if (orient & MAP_STACK_GROWS_UP) 3354 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 3355 } 3356 3357 vm_map_unlock(map); 3358 return (rv); 3359 } 3360 3361 static int stack_guard_page = 0; 3362 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page); 3363 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW, 3364 &stack_guard_page, 0, 3365 "Insert stack guard page ahead of the growable segments."); 3366 3367 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3368 * desired address is already mapped, or if we successfully grow 3369 * the stack. Also returns KERN_SUCCESS if addr is outside the 3370 * stack range (this is strange, but preserves compatibility with 3371 * the grow function in vm_machdep.c). 3372 */ 3373 int 3374 vm_map_growstack(struct proc *p, vm_offset_t addr) 3375 { 3376 vm_map_entry_t next_entry, prev_entry; 3377 vm_map_entry_t new_entry, stack_entry; 3378 struct vmspace *vm = p->p_vmspace; 3379 vm_map_t map = &vm->vm_map; 3380 vm_offset_t end; 3381 vm_size_t growsize; 3382 size_t grow_amount, max_grow; 3383 rlim_t lmemlim, stacklim, vmemlim; 3384 int is_procstack, rv; 3385 struct ucred *cred; 3386 #ifdef notyet 3387 uint64_t limit; 3388 #endif 3389 #ifdef RACCT 3390 int error; 3391 #endif 3392 3393 Retry: 3394 PROC_LOCK(p); 3395 lmemlim = lim_cur(p, RLIMIT_MEMLOCK); 3396 stacklim = lim_cur(p, RLIMIT_STACK); 3397 vmemlim = lim_cur(p, RLIMIT_VMEM); 3398 PROC_UNLOCK(p); 3399 3400 vm_map_lock_read(map); 3401 3402 /* If addr is already in the entry range, no need to grow.*/ 3403 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 3404 vm_map_unlock_read(map); 3405 return (KERN_SUCCESS); 3406 } 3407 3408 next_entry = prev_entry->next; 3409 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 3410 /* 3411 * This entry does not grow upwards. Since the address lies 3412 * beyond this entry, the next entry (if one exists) has to 3413 * be a downward growable entry. The entry list header is 3414 * never a growable entry, so it suffices to check the flags. 3415 */ 3416 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 3417 vm_map_unlock_read(map); 3418 return (KERN_SUCCESS); 3419 } 3420 stack_entry = next_entry; 3421 } else { 3422 /* 3423 * This entry grows upward. If the next entry does not at 3424 * least grow downwards, this is the entry we need to grow. 3425 * otherwise we have two possible choices and we have to 3426 * select one. 3427 */ 3428 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 3429 /* 3430 * We have two choices; grow the entry closest to 3431 * the address to minimize the amount of growth. 3432 */ 3433 if (addr - prev_entry->end <= next_entry->start - addr) 3434 stack_entry = prev_entry; 3435 else 3436 stack_entry = next_entry; 3437 } else 3438 stack_entry = prev_entry; 3439 } 3440 3441 if (stack_entry == next_entry) { 3442 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 3443 KASSERT(addr < stack_entry->start, ("foo")); 3444 end = (prev_entry != &map->header) ? prev_entry->end : 3445 stack_entry->start - stack_entry->avail_ssize; 3446 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 3447 max_grow = stack_entry->start - end; 3448 } else { 3449 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 3450 KASSERT(addr >= stack_entry->end, ("foo")); 3451 end = (next_entry != &map->header) ? next_entry->start : 3452 stack_entry->end + stack_entry->avail_ssize; 3453 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 3454 max_grow = end - stack_entry->end; 3455 } 3456 3457 if (grow_amount > stack_entry->avail_ssize) { 3458 vm_map_unlock_read(map); 3459 return (KERN_NO_SPACE); 3460 } 3461 3462 /* 3463 * If there is no longer enough space between the entries nogo, and 3464 * adjust the available space. Note: this should only happen if the 3465 * user has mapped into the stack area after the stack was created, 3466 * and is probably an error. 3467 * 3468 * This also effectively destroys any guard page the user might have 3469 * intended by limiting the stack size. 3470 */ 3471 if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) { 3472 if (vm_map_lock_upgrade(map)) 3473 goto Retry; 3474 3475 stack_entry->avail_ssize = max_grow; 3476 3477 vm_map_unlock(map); 3478 return (KERN_NO_SPACE); 3479 } 3480 3481 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 3482 3483 /* 3484 * If this is the main process stack, see if we're over the stack 3485 * limit. 3486 */ 3487 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3488 vm_map_unlock_read(map); 3489 return (KERN_NO_SPACE); 3490 } 3491 #ifdef RACCT 3492 PROC_LOCK(p); 3493 if (is_procstack && 3494 racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) { 3495 PROC_UNLOCK(p); 3496 vm_map_unlock_read(map); 3497 return (KERN_NO_SPACE); 3498 } 3499 PROC_UNLOCK(p); 3500 #endif 3501 3502 /* Round up the grow amount modulo sgrowsiz */ 3503 growsize = sgrowsiz; 3504 grow_amount = roundup(grow_amount, growsize); 3505 if (grow_amount > stack_entry->avail_ssize) 3506 grow_amount = stack_entry->avail_ssize; 3507 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3508 grow_amount = trunc_page((vm_size_t)stacklim) - 3509 ctob(vm->vm_ssize); 3510 } 3511 #ifdef notyet 3512 PROC_LOCK(p); 3513 limit = racct_get_available(p, RACCT_STACK); 3514 PROC_UNLOCK(p); 3515 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3516 grow_amount = limit - ctob(vm->vm_ssize); 3517 #endif 3518 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 3519 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3520 vm_map_unlock_read(map); 3521 rv = KERN_NO_SPACE; 3522 goto out; 3523 } 3524 #ifdef RACCT 3525 PROC_LOCK(p); 3526 if (racct_set(p, RACCT_MEMLOCK, 3527 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3528 PROC_UNLOCK(p); 3529 vm_map_unlock_read(map); 3530 rv = KERN_NO_SPACE; 3531 goto out; 3532 } 3533 PROC_UNLOCK(p); 3534 #endif 3535 } 3536 /* If we would blow our VMEM resource limit, no go */ 3537 if (map->size + grow_amount > vmemlim) { 3538 vm_map_unlock_read(map); 3539 rv = KERN_NO_SPACE; 3540 goto out; 3541 } 3542 #ifdef RACCT 3543 PROC_LOCK(p); 3544 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3545 PROC_UNLOCK(p); 3546 vm_map_unlock_read(map); 3547 rv = KERN_NO_SPACE; 3548 goto out; 3549 } 3550 PROC_UNLOCK(p); 3551 #endif 3552 3553 if (vm_map_lock_upgrade(map)) 3554 goto Retry; 3555 3556 if (stack_entry == next_entry) { 3557 /* 3558 * Growing downward. 3559 */ 3560 /* Get the preliminary new entry start value */ 3561 addr = stack_entry->start - grow_amount; 3562 3563 /* 3564 * If this puts us into the previous entry, cut back our 3565 * growth to the available space. Also, see the note above. 3566 */ 3567 if (addr < end) { 3568 stack_entry->avail_ssize = max_grow; 3569 addr = end; 3570 if (stack_guard_page) 3571 addr += PAGE_SIZE; 3572 } 3573 3574 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 3575 next_entry->protection, next_entry->max_protection, 0); 3576 3577 /* Adjust the available stack space by the amount we grew. */ 3578 if (rv == KERN_SUCCESS) { 3579 if (prev_entry != &map->header) 3580 vm_map_clip_end(map, prev_entry, addr); 3581 new_entry = prev_entry->next; 3582 KASSERT(new_entry == stack_entry->prev, ("foo")); 3583 KASSERT(new_entry->end == stack_entry->start, ("foo")); 3584 KASSERT(new_entry->start == addr, ("foo")); 3585 grow_amount = new_entry->end - new_entry->start; 3586 new_entry->avail_ssize = stack_entry->avail_ssize - 3587 grow_amount; 3588 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 3589 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 3590 } 3591 } else { 3592 /* 3593 * Growing upward. 3594 */ 3595 addr = stack_entry->end + grow_amount; 3596 3597 /* 3598 * If this puts us into the next entry, cut back our growth 3599 * to the available space. Also, see the note above. 3600 */ 3601 if (addr > end) { 3602 stack_entry->avail_ssize = end - stack_entry->end; 3603 addr = end; 3604 if (stack_guard_page) 3605 addr -= PAGE_SIZE; 3606 } 3607 3608 grow_amount = addr - stack_entry->end; 3609 cred = stack_entry->cred; 3610 if (cred == NULL && stack_entry->object.vm_object != NULL) 3611 cred = stack_entry->object.vm_object->cred; 3612 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3613 rv = KERN_NO_SPACE; 3614 /* Grow the underlying object if applicable. */ 3615 else if (stack_entry->object.vm_object == NULL || 3616 vm_object_coalesce(stack_entry->object.vm_object, 3617 stack_entry->offset, 3618 (vm_size_t)(stack_entry->end - stack_entry->start), 3619 (vm_size_t)grow_amount, cred != NULL)) { 3620 map->size += (addr - stack_entry->end); 3621 /* Update the current entry. */ 3622 stack_entry->end = addr; 3623 stack_entry->avail_ssize -= grow_amount; 3624 vm_map_entry_resize_free(map, stack_entry); 3625 rv = KERN_SUCCESS; 3626 3627 if (next_entry != &map->header) 3628 vm_map_clip_start(map, next_entry, addr); 3629 } else 3630 rv = KERN_FAILURE; 3631 } 3632 3633 if (rv == KERN_SUCCESS && is_procstack) 3634 vm->vm_ssize += btoc(grow_amount); 3635 3636 vm_map_unlock(map); 3637 3638 /* 3639 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3640 */ 3641 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 3642 vm_map_wire(map, 3643 (stack_entry == next_entry) ? addr : addr - grow_amount, 3644 (stack_entry == next_entry) ? stack_entry->start : addr, 3645 (p->p_flag & P_SYSTEM) 3646 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 3647 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 3648 } 3649 3650 out: 3651 #ifdef RACCT 3652 if (rv != KERN_SUCCESS) { 3653 PROC_LOCK(p); 3654 error = racct_set(p, RACCT_VMEM, map->size); 3655 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3656 if (!old_mlock) { 3657 error = racct_set(p, RACCT_MEMLOCK, 3658 ptoa(pmap_wired_count(map->pmap))); 3659 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3660 } 3661 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3662 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3663 PROC_UNLOCK(p); 3664 } 3665 #endif 3666 3667 return (rv); 3668 } 3669 3670 /* 3671 * Unshare the specified VM space for exec. If other processes are 3672 * mapped to it, then create a new one. The new vmspace is null. 3673 */ 3674 int 3675 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3676 { 3677 struct vmspace *oldvmspace = p->p_vmspace; 3678 struct vmspace *newvmspace; 3679 3680 newvmspace = vmspace_alloc(minuser, maxuser); 3681 if (newvmspace == NULL) 3682 return (ENOMEM); 3683 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3684 /* 3685 * This code is written like this for prototype purposes. The 3686 * goal is to avoid running down the vmspace here, but let the 3687 * other process's that are still using the vmspace to finally 3688 * run it down. Even though there is little or no chance of blocking 3689 * here, it is a good idea to keep this form for future mods. 3690 */ 3691 PROC_VMSPACE_LOCK(p); 3692 p->p_vmspace = newvmspace; 3693 PROC_VMSPACE_UNLOCK(p); 3694 if (p == curthread->td_proc) 3695 pmap_activate(curthread); 3696 vmspace_free(oldvmspace); 3697 return (0); 3698 } 3699 3700 /* 3701 * Unshare the specified VM space for forcing COW. This 3702 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3703 */ 3704 int 3705 vmspace_unshare(struct proc *p) 3706 { 3707 struct vmspace *oldvmspace = p->p_vmspace; 3708 struct vmspace *newvmspace; 3709 vm_ooffset_t fork_charge; 3710 3711 if (oldvmspace->vm_refcnt == 1) 3712 return (0); 3713 fork_charge = 0; 3714 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 3715 if (newvmspace == NULL) 3716 return (ENOMEM); 3717 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 3718 vmspace_free(newvmspace); 3719 return (ENOMEM); 3720 } 3721 PROC_VMSPACE_LOCK(p); 3722 p->p_vmspace = newvmspace; 3723 PROC_VMSPACE_UNLOCK(p); 3724 if (p == curthread->td_proc) 3725 pmap_activate(curthread); 3726 vmspace_free(oldvmspace); 3727 return (0); 3728 } 3729 3730 /* 3731 * vm_map_lookup: 3732 * 3733 * Finds the VM object, offset, and 3734 * protection for a given virtual address in the 3735 * specified map, assuming a page fault of the 3736 * type specified. 3737 * 3738 * Leaves the map in question locked for read; return 3739 * values are guaranteed until a vm_map_lookup_done 3740 * call is performed. Note that the map argument 3741 * is in/out; the returned map must be used in 3742 * the call to vm_map_lookup_done. 3743 * 3744 * A handle (out_entry) is returned for use in 3745 * vm_map_lookup_done, to make that fast. 3746 * 3747 * If a lookup is requested with "write protection" 3748 * specified, the map may be changed to perform virtual 3749 * copying operations, although the data referenced will 3750 * remain the same. 3751 */ 3752 int 3753 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3754 vm_offset_t vaddr, 3755 vm_prot_t fault_typea, 3756 vm_map_entry_t *out_entry, /* OUT */ 3757 vm_object_t *object, /* OUT */ 3758 vm_pindex_t *pindex, /* OUT */ 3759 vm_prot_t *out_prot, /* OUT */ 3760 boolean_t *wired) /* OUT */ 3761 { 3762 vm_map_entry_t entry; 3763 vm_map_t map = *var_map; 3764 vm_prot_t prot; 3765 vm_prot_t fault_type = fault_typea; 3766 vm_object_t eobject; 3767 vm_size_t size; 3768 struct ucred *cred; 3769 3770 RetryLookup:; 3771 3772 vm_map_lock_read(map); 3773 3774 /* 3775 * Lookup the faulting address. 3776 */ 3777 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 3778 vm_map_unlock_read(map); 3779 return (KERN_INVALID_ADDRESS); 3780 } 3781 3782 entry = *out_entry; 3783 3784 /* 3785 * Handle submaps. 3786 */ 3787 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3788 vm_map_t old_map = map; 3789 3790 *var_map = map = entry->object.sub_map; 3791 vm_map_unlock_read(old_map); 3792 goto RetryLookup; 3793 } 3794 3795 /* 3796 * Check whether this task is allowed to have this page. 3797 */ 3798 prot = entry->protection; 3799 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3800 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 3801 vm_map_unlock_read(map); 3802 return (KERN_PROTECTION_FAILURE); 3803 } 3804 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3805 (entry->eflags & MAP_ENTRY_COW) && 3806 (fault_type & VM_PROT_WRITE)) { 3807 vm_map_unlock_read(map); 3808 return (KERN_PROTECTION_FAILURE); 3809 } 3810 if ((fault_typea & VM_PROT_COPY) != 0 && 3811 (entry->max_protection & VM_PROT_WRITE) == 0 && 3812 (entry->eflags & MAP_ENTRY_COW) == 0) { 3813 vm_map_unlock_read(map); 3814 return (KERN_PROTECTION_FAILURE); 3815 } 3816 3817 /* 3818 * If this page is not pageable, we have to get it for all possible 3819 * accesses. 3820 */ 3821 *wired = (entry->wired_count != 0); 3822 if (*wired) 3823 fault_type = entry->protection; 3824 size = entry->end - entry->start; 3825 /* 3826 * If the entry was copy-on-write, we either ... 3827 */ 3828 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3829 /* 3830 * If we want to write the page, we may as well handle that 3831 * now since we've got the map locked. 3832 * 3833 * If we don't need to write the page, we just demote the 3834 * permissions allowed. 3835 */ 3836 if ((fault_type & VM_PROT_WRITE) != 0 || 3837 (fault_typea & VM_PROT_COPY) != 0) { 3838 /* 3839 * Make a new object, and place it in the object 3840 * chain. Note that no new references have appeared 3841 * -- one just moved from the map to the new 3842 * object. 3843 */ 3844 if (vm_map_lock_upgrade(map)) 3845 goto RetryLookup; 3846 3847 if (entry->cred == NULL) { 3848 /* 3849 * The debugger owner is charged for 3850 * the memory. 3851 */ 3852 cred = curthread->td_ucred; 3853 crhold(cred); 3854 if (!swap_reserve_by_cred(size, cred)) { 3855 crfree(cred); 3856 vm_map_unlock(map); 3857 return (KERN_RESOURCE_SHORTAGE); 3858 } 3859 entry->cred = cred; 3860 } 3861 vm_object_shadow(&entry->object.vm_object, 3862 &entry->offset, size); 3863 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3864 eobject = entry->object.vm_object; 3865 if (eobject->cred != NULL) { 3866 /* 3867 * The object was not shadowed. 3868 */ 3869 swap_release_by_cred(size, entry->cred); 3870 crfree(entry->cred); 3871 entry->cred = NULL; 3872 } else if (entry->cred != NULL) { 3873 VM_OBJECT_WLOCK(eobject); 3874 eobject->cred = entry->cred; 3875 eobject->charge = size; 3876 VM_OBJECT_WUNLOCK(eobject); 3877 entry->cred = NULL; 3878 } 3879 3880 vm_map_lock_downgrade(map); 3881 } else { 3882 /* 3883 * We're attempting to read a copy-on-write page -- 3884 * don't allow writes. 3885 */ 3886 prot &= ~VM_PROT_WRITE; 3887 } 3888 } 3889 3890 /* 3891 * Create an object if necessary. 3892 */ 3893 if (entry->object.vm_object == NULL && 3894 !map->system_map) { 3895 if (vm_map_lock_upgrade(map)) 3896 goto RetryLookup; 3897 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3898 atop(size)); 3899 entry->offset = 0; 3900 if (entry->cred != NULL) { 3901 VM_OBJECT_WLOCK(entry->object.vm_object); 3902 entry->object.vm_object->cred = entry->cred; 3903 entry->object.vm_object->charge = size; 3904 VM_OBJECT_WUNLOCK(entry->object.vm_object); 3905 entry->cred = NULL; 3906 } 3907 vm_map_lock_downgrade(map); 3908 } 3909 3910 /* 3911 * Return the object/offset from this entry. If the entry was 3912 * copy-on-write or empty, it has been fixed up. 3913 */ 3914 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3915 *object = entry->object.vm_object; 3916 3917 *out_prot = prot; 3918 return (KERN_SUCCESS); 3919 } 3920 3921 /* 3922 * vm_map_lookup_locked: 3923 * 3924 * Lookup the faulting address. A version of vm_map_lookup that returns 3925 * KERN_FAILURE instead of blocking on map lock or memory allocation. 3926 */ 3927 int 3928 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 3929 vm_offset_t vaddr, 3930 vm_prot_t fault_typea, 3931 vm_map_entry_t *out_entry, /* OUT */ 3932 vm_object_t *object, /* OUT */ 3933 vm_pindex_t *pindex, /* OUT */ 3934 vm_prot_t *out_prot, /* OUT */ 3935 boolean_t *wired) /* OUT */ 3936 { 3937 vm_map_entry_t entry; 3938 vm_map_t map = *var_map; 3939 vm_prot_t prot; 3940 vm_prot_t fault_type = fault_typea; 3941 3942 /* 3943 * Lookup the faulting address. 3944 */ 3945 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 3946 return (KERN_INVALID_ADDRESS); 3947 3948 entry = *out_entry; 3949 3950 /* 3951 * Fail if the entry refers to a submap. 3952 */ 3953 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3954 return (KERN_FAILURE); 3955 3956 /* 3957 * Check whether this task is allowed to have this page. 3958 */ 3959 prot = entry->protection; 3960 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 3961 if ((fault_type & prot) != fault_type) 3962 return (KERN_PROTECTION_FAILURE); 3963 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3964 (entry->eflags & MAP_ENTRY_COW) && 3965 (fault_type & VM_PROT_WRITE)) 3966 return (KERN_PROTECTION_FAILURE); 3967 3968 /* 3969 * If this page is not pageable, we have to get it for all possible 3970 * accesses. 3971 */ 3972 *wired = (entry->wired_count != 0); 3973 if (*wired) 3974 fault_type = entry->protection; 3975 3976 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3977 /* 3978 * Fail if the entry was copy-on-write for a write fault. 3979 */ 3980 if (fault_type & VM_PROT_WRITE) 3981 return (KERN_FAILURE); 3982 /* 3983 * We're attempting to read a copy-on-write page -- 3984 * don't allow writes. 3985 */ 3986 prot &= ~VM_PROT_WRITE; 3987 } 3988 3989 /* 3990 * Fail if an object should be created. 3991 */ 3992 if (entry->object.vm_object == NULL && !map->system_map) 3993 return (KERN_FAILURE); 3994 3995 /* 3996 * Return the object/offset from this entry. If the entry was 3997 * copy-on-write or empty, it has been fixed up. 3998 */ 3999 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4000 *object = entry->object.vm_object; 4001 4002 *out_prot = prot; 4003 return (KERN_SUCCESS); 4004 } 4005 4006 /* 4007 * vm_map_lookup_done: 4008 * 4009 * Releases locks acquired by a vm_map_lookup 4010 * (according to the handle returned by that lookup). 4011 */ 4012 void 4013 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4014 { 4015 /* 4016 * Unlock the main-level map 4017 */ 4018 vm_map_unlock_read(map); 4019 } 4020 4021 #include "opt_ddb.h" 4022 #ifdef DDB 4023 #include <sys/kernel.h> 4024 4025 #include <ddb/ddb.h> 4026 4027 static void 4028 vm_map_print(vm_map_t map) 4029 { 4030 vm_map_entry_t entry; 4031 4032 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4033 (void *)map, 4034 (void *)map->pmap, map->nentries, map->timestamp); 4035 4036 db_indent += 2; 4037 for (entry = map->header.next; entry != &map->header; 4038 entry = entry->next) { 4039 db_iprintf("map entry %p: start=%p, end=%p\n", 4040 (void *)entry, (void *)entry->start, (void *)entry->end); 4041 { 4042 static char *inheritance_name[4] = 4043 {"share", "copy", "none", "donate_copy"}; 4044 4045 db_iprintf(" prot=%x/%x/%s", 4046 entry->protection, 4047 entry->max_protection, 4048 inheritance_name[(int)(unsigned char)entry->inheritance]); 4049 if (entry->wired_count != 0) 4050 db_printf(", wired"); 4051 } 4052 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4053 db_printf(", share=%p, offset=0x%jx\n", 4054 (void *)entry->object.sub_map, 4055 (uintmax_t)entry->offset); 4056 if ((entry->prev == &map->header) || 4057 (entry->prev->object.sub_map != 4058 entry->object.sub_map)) { 4059 db_indent += 2; 4060 vm_map_print((vm_map_t)entry->object.sub_map); 4061 db_indent -= 2; 4062 } 4063 } else { 4064 if (entry->cred != NULL) 4065 db_printf(", ruid %d", entry->cred->cr_ruid); 4066 db_printf(", object=%p, offset=0x%jx", 4067 (void *)entry->object.vm_object, 4068 (uintmax_t)entry->offset); 4069 if (entry->object.vm_object && entry->object.vm_object->cred) 4070 db_printf(", obj ruid %d charge %jx", 4071 entry->object.vm_object->cred->cr_ruid, 4072 (uintmax_t)entry->object.vm_object->charge); 4073 if (entry->eflags & MAP_ENTRY_COW) 4074 db_printf(", copy (%s)", 4075 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4076 db_printf("\n"); 4077 4078 if ((entry->prev == &map->header) || 4079 (entry->prev->object.vm_object != 4080 entry->object.vm_object)) { 4081 db_indent += 2; 4082 vm_object_print((db_expr_t)(intptr_t) 4083 entry->object.vm_object, 4084 1, 0, (char *)0); 4085 db_indent -= 2; 4086 } 4087 } 4088 } 4089 db_indent -= 2; 4090 } 4091 4092 DB_SHOW_COMMAND(map, map) 4093 { 4094 4095 if (!have_addr) { 4096 db_printf("usage: show map <addr>\n"); 4097 return; 4098 } 4099 vm_map_print((vm_map_t)addr); 4100 } 4101 4102 DB_SHOW_COMMAND(procvm, procvm) 4103 { 4104 struct proc *p; 4105 4106 if (have_addr) { 4107 p = (struct proc *) addr; 4108 } else { 4109 p = curproc; 4110 } 4111 4112 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4113 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4114 (void *)vmspace_pmap(p->p_vmspace)); 4115 4116 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4117 } 4118 4119 #endif /* DDB */ 4120