1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/vmmeter.h> 75 #include <sys/mman.h> 76 #include <sys/vnode.h> 77 #include <sys/resourcevar.h> 78 #include <sys/file.h> 79 #include <sys/sysent.h> 80 #include <sys/shm.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 /* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a self-adjusting binary search tree of these 104 * entries is used to speed up lookups. 105 * 106 * Since portions of maps are specified by start/end addresses, 107 * which may not align with existing map entries, all 108 * routines merely "clip" entries to these start/end values. 109 * [That is, an entry is split into two, bordering at a 110 * start or end value.] Note that these clippings may not 111 * always be necessary (as the two resulting entries are then 112 * not changed); however, the clipping is done for convenience. 113 * 114 * As mentioned above, virtual copy operations are performed 115 * by copying VM object references from one map to 116 * another, and then marking both regions as copy-on-write. 117 */ 118 119 static struct mtx map_sleep_mtx; 120 static uma_zone_t mapentzone; 121 static uma_zone_t kmapentzone; 122 static uma_zone_t mapzone; 123 static uma_zone_t vmspace_zone; 124 static struct vm_object kmapentobj; 125 static int vmspace_zinit(void *mem, int size, int flags); 126 static void vmspace_zfini(void *mem, int size); 127 static int vm_map_zinit(void *mem, int ize, int flags); 128 static void vm_map_zfini(void *mem, int size); 129 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 130 vm_offset_t max); 131 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 132 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 133 #ifdef INVARIANTS 134 static void vm_map_zdtor(void *mem, int size, void *arg); 135 static void vmspace_zdtor(void *mem, int size, void *arg); 136 #endif 137 138 #define ENTRY_CHARGED(e) ((e)->uip != NULL || \ 139 ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \ 140 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 141 142 /* 143 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 144 * stable. 145 */ 146 #define PROC_VMSPACE_LOCK(p) do { } while (0) 147 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 148 149 /* 150 * VM_MAP_RANGE_CHECK: [ internal use only ] 151 * 152 * Asserts that the starting and ending region 153 * addresses fall within the valid range of the map. 154 */ 155 #define VM_MAP_RANGE_CHECK(map, start, end) \ 156 { \ 157 if (start < vm_map_min(map)) \ 158 start = vm_map_min(map); \ 159 if (end > vm_map_max(map)) \ 160 end = vm_map_max(map); \ 161 if (start > end) \ 162 start = end; \ 163 } 164 165 /* 166 * vm_map_startup: 167 * 168 * Initialize the vm_map module. Must be called before 169 * any other vm_map routines. 170 * 171 * Map and entry structures are allocated from the general 172 * purpose memory pool with some exceptions: 173 * 174 * - The kernel map and kmem submap are allocated statically. 175 * - Kernel map entries are allocated out of a static pool. 176 * 177 * These restrictions are necessary since malloc() uses the 178 * maps and requires map entries. 179 */ 180 181 void 182 vm_map_startup(void) 183 { 184 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 185 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 186 #ifdef INVARIANTS 187 vm_map_zdtor, 188 #else 189 NULL, 190 #endif 191 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 192 uma_prealloc(mapzone, MAX_KMAP); 193 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 194 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 195 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 196 uma_prealloc(kmapentzone, MAX_KMAPENT); 197 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 198 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 199 } 200 201 static void 202 vmspace_zfini(void *mem, int size) 203 { 204 struct vmspace *vm; 205 206 vm = (struct vmspace *)mem; 207 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 208 } 209 210 static int 211 vmspace_zinit(void *mem, int size, int flags) 212 { 213 struct vmspace *vm; 214 215 vm = (struct vmspace *)mem; 216 217 vm->vm_map.pmap = NULL; 218 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 219 return (0); 220 } 221 222 static void 223 vm_map_zfini(void *mem, int size) 224 { 225 vm_map_t map; 226 227 map = (vm_map_t)mem; 228 mtx_destroy(&map->system_mtx); 229 sx_destroy(&map->lock); 230 } 231 232 static int 233 vm_map_zinit(void *mem, int size, int flags) 234 { 235 vm_map_t map; 236 237 map = (vm_map_t)mem; 238 map->nentries = 0; 239 map->size = 0; 240 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 241 sx_init(&map->lock, "user map"); 242 return (0); 243 } 244 245 #ifdef INVARIANTS 246 static void 247 vmspace_zdtor(void *mem, int size, void *arg) 248 { 249 struct vmspace *vm; 250 251 vm = (struct vmspace *)mem; 252 253 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 254 } 255 static void 256 vm_map_zdtor(void *mem, int size, void *arg) 257 { 258 vm_map_t map; 259 260 map = (vm_map_t)mem; 261 KASSERT(map->nentries == 0, 262 ("map %p nentries == %d on free.", 263 map, map->nentries)); 264 KASSERT(map->size == 0, 265 ("map %p size == %lu on free.", 266 map, (unsigned long)map->size)); 267 } 268 #endif /* INVARIANTS */ 269 270 /* 271 * Allocate a vmspace structure, including a vm_map and pmap, 272 * and initialize those structures. The refcnt is set to 1. 273 */ 274 struct vmspace * 275 vmspace_alloc(min, max) 276 vm_offset_t min, max; 277 { 278 struct vmspace *vm; 279 280 vm = uma_zalloc(vmspace_zone, M_WAITOK); 281 if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) { 282 uma_zfree(vmspace_zone, vm); 283 return (NULL); 284 } 285 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 286 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 287 vm->vm_refcnt = 1; 288 vm->vm_shm = NULL; 289 vm->vm_swrss = 0; 290 vm->vm_tsize = 0; 291 vm->vm_dsize = 0; 292 vm->vm_ssize = 0; 293 vm->vm_taddr = 0; 294 vm->vm_daddr = 0; 295 vm->vm_maxsaddr = 0; 296 return (vm); 297 } 298 299 void 300 vm_init2(void) 301 { 302 uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 303 (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + 304 maxproc * 2 + maxfiles); 305 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 306 #ifdef INVARIANTS 307 vmspace_zdtor, 308 #else 309 NULL, 310 #endif 311 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 312 } 313 314 static inline void 315 vmspace_dofree(struct vmspace *vm) 316 { 317 318 CTR1(KTR_VM, "vmspace_free: %p", vm); 319 320 /* 321 * Make sure any SysV shm is freed, it might not have been in 322 * exit1(). 323 */ 324 shmexit(vm); 325 326 /* 327 * Lock the map, to wait out all other references to it. 328 * Delete all of the mappings and pages they hold, then call 329 * the pmap module to reclaim anything left. 330 */ 331 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 332 vm->vm_map.max_offset); 333 334 pmap_release(vmspace_pmap(vm)); 335 vm->vm_map.pmap = NULL; 336 uma_zfree(vmspace_zone, vm); 337 } 338 339 void 340 vmspace_free(struct vmspace *vm) 341 { 342 343 if (vm->vm_refcnt == 0) 344 panic("vmspace_free: attempt to free already freed vmspace"); 345 346 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 347 vmspace_dofree(vm); 348 } 349 350 void 351 vmspace_exitfree(struct proc *p) 352 { 353 struct vmspace *vm; 354 355 PROC_VMSPACE_LOCK(p); 356 vm = p->p_vmspace; 357 p->p_vmspace = NULL; 358 PROC_VMSPACE_UNLOCK(p); 359 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 360 vmspace_free(vm); 361 } 362 363 void 364 vmspace_exit(struct thread *td) 365 { 366 int refcnt; 367 struct vmspace *vm; 368 struct proc *p; 369 370 /* 371 * Release user portion of address space. 372 * This releases references to vnodes, 373 * which could cause I/O if the file has been unlinked. 374 * Need to do this early enough that we can still sleep. 375 * 376 * The last exiting process to reach this point releases as 377 * much of the environment as it can. vmspace_dofree() is the 378 * slower fallback in case another process had a temporary 379 * reference to the vmspace. 380 */ 381 382 p = td->td_proc; 383 vm = p->p_vmspace; 384 atomic_add_int(&vmspace0.vm_refcnt, 1); 385 do { 386 refcnt = vm->vm_refcnt; 387 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 388 /* Switch now since other proc might free vmspace */ 389 PROC_VMSPACE_LOCK(p); 390 p->p_vmspace = &vmspace0; 391 PROC_VMSPACE_UNLOCK(p); 392 pmap_activate(td); 393 } 394 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 395 if (refcnt == 1) { 396 if (p->p_vmspace != vm) { 397 /* vmspace not yet freed, switch back */ 398 PROC_VMSPACE_LOCK(p); 399 p->p_vmspace = vm; 400 PROC_VMSPACE_UNLOCK(p); 401 pmap_activate(td); 402 } 403 pmap_remove_pages(vmspace_pmap(vm)); 404 /* Switch now since this proc will free vmspace */ 405 PROC_VMSPACE_LOCK(p); 406 p->p_vmspace = &vmspace0; 407 PROC_VMSPACE_UNLOCK(p); 408 pmap_activate(td); 409 vmspace_dofree(vm); 410 } 411 } 412 413 /* Acquire reference to vmspace owned by another process. */ 414 415 struct vmspace * 416 vmspace_acquire_ref(struct proc *p) 417 { 418 struct vmspace *vm; 419 int refcnt; 420 421 PROC_VMSPACE_LOCK(p); 422 vm = p->p_vmspace; 423 if (vm == NULL) { 424 PROC_VMSPACE_UNLOCK(p); 425 return (NULL); 426 } 427 do { 428 refcnt = vm->vm_refcnt; 429 if (refcnt <= 0) { /* Avoid 0->1 transition */ 430 PROC_VMSPACE_UNLOCK(p); 431 return (NULL); 432 } 433 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 434 if (vm != p->p_vmspace) { 435 PROC_VMSPACE_UNLOCK(p); 436 vmspace_free(vm); 437 return (NULL); 438 } 439 PROC_VMSPACE_UNLOCK(p); 440 return (vm); 441 } 442 443 void 444 _vm_map_lock(vm_map_t map, const char *file, int line) 445 { 446 447 if (map->system_map) 448 _mtx_lock_flags(&map->system_mtx, 0, file, line); 449 else 450 (void)_sx_xlock(&map->lock, 0, file, line); 451 map->timestamp++; 452 } 453 454 static void 455 vm_map_process_deferred(void) 456 { 457 struct thread *td; 458 vm_map_entry_t entry; 459 460 td = curthread; 461 462 while ((entry = td->td_map_def_user) != NULL) { 463 td->td_map_def_user = entry->next; 464 vm_map_entry_deallocate(entry, FALSE); 465 } 466 } 467 468 void 469 _vm_map_unlock(vm_map_t map, const char *file, int line) 470 { 471 472 if (map->system_map) 473 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 474 else { 475 _sx_xunlock(&map->lock, file, line); 476 vm_map_process_deferred(); 477 } 478 } 479 480 void 481 _vm_map_lock_read(vm_map_t map, const char *file, int line) 482 { 483 484 if (map->system_map) 485 _mtx_lock_flags(&map->system_mtx, 0, file, line); 486 else 487 (void)_sx_slock(&map->lock, 0, file, line); 488 } 489 490 void 491 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 492 { 493 494 if (map->system_map) 495 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 496 else { 497 _sx_sunlock(&map->lock, file, line); 498 vm_map_process_deferred(); 499 } 500 } 501 502 int 503 _vm_map_trylock(vm_map_t map, const char *file, int line) 504 { 505 int error; 506 507 error = map->system_map ? 508 !_mtx_trylock(&map->system_mtx, 0, file, line) : 509 !_sx_try_xlock(&map->lock, file, line); 510 if (error == 0) 511 map->timestamp++; 512 return (error == 0); 513 } 514 515 int 516 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 517 { 518 int error; 519 520 error = map->system_map ? 521 !_mtx_trylock(&map->system_mtx, 0, file, line) : 522 !_sx_try_slock(&map->lock, file, line); 523 return (error == 0); 524 } 525 526 /* 527 * _vm_map_lock_upgrade: [ internal use only ] 528 * 529 * Tries to upgrade a read (shared) lock on the specified map to a write 530 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 531 * non-zero value if the upgrade fails. If the upgrade fails, the map is 532 * returned without a read or write lock held. 533 * 534 * Requires that the map be read locked. 535 */ 536 int 537 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 538 { 539 unsigned int last_timestamp; 540 541 if (map->system_map) { 542 #ifdef INVARIANTS 543 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 544 #endif 545 } else { 546 if (!_sx_try_upgrade(&map->lock, file, line)) { 547 last_timestamp = map->timestamp; 548 _sx_sunlock(&map->lock, file, line); 549 vm_map_process_deferred(); 550 /* 551 * If the map's timestamp does not change while the 552 * map is unlocked, then the upgrade succeeds. 553 */ 554 (void)_sx_xlock(&map->lock, 0, file, line); 555 if (last_timestamp != map->timestamp) { 556 _sx_xunlock(&map->lock, file, line); 557 return (1); 558 } 559 } 560 } 561 map->timestamp++; 562 return (0); 563 } 564 565 void 566 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 567 { 568 569 if (map->system_map) { 570 #ifdef INVARIANTS 571 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 572 #endif 573 } else 574 _sx_downgrade(&map->lock, file, line); 575 } 576 577 /* 578 * vm_map_locked: 579 * 580 * Returns a non-zero value if the caller holds a write (exclusive) lock 581 * on the specified map and the value "0" otherwise. 582 */ 583 int 584 vm_map_locked(vm_map_t map) 585 { 586 587 if (map->system_map) 588 return (mtx_owned(&map->system_mtx)); 589 else 590 return (sx_xlocked(&map->lock)); 591 } 592 593 #ifdef INVARIANTS 594 static void 595 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 596 { 597 598 if (map->system_map) 599 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 600 else 601 _sx_assert(&map->lock, SA_XLOCKED, file, line); 602 } 603 604 #if 0 605 static void 606 _vm_map_assert_locked_read(vm_map_t map, const char *file, int line) 607 { 608 609 if (map->system_map) 610 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 611 else 612 _sx_assert(&map->lock, SA_SLOCKED, file, line); 613 } 614 #endif 615 616 #define VM_MAP_ASSERT_LOCKED(map) \ 617 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 618 #define VM_MAP_ASSERT_LOCKED_READ(map) \ 619 _vm_map_assert_locked_read(map, LOCK_FILE, LOCK_LINE) 620 #else 621 #define VM_MAP_ASSERT_LOCKED(map) 622 #define VM_MAP_ASSERT_LOCKED_READ(map) 623 #endif 624 625 /* 626 * _vm_map_unlock_and_wait: 627 * 628 * Atomically releases the lock on the specified map and puts the calling 629 * thread to sleep. The calling thread will remain asleep until either 630 * vm_map_wakeup() is performed on the map or the specified timeout is 631 * exceeded. 632 * 633 * WARNING! This function does not perform deferred deallocations of 634 * objects and map entries. Therefore, the calling thread is expected to 635 * reacquire the map lock after reawakening and later perform an ordinary 636 * unlock operation, such as vm_map_unlock(), before completing its 637 * operation on the map. 638 */ 639 int 640 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 641 { 642 643 mtx_lock(&map_sleep_mtx); 644 if (map->system_map) 645 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 646 else 647 _sx_xunlock(&map->lock, file, line); 648 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 649 timo)); 650 } 651 652 /* 653 * vm_map_wakeup: 654 * 655 * Awaken any threads that have slept on the map using 656 * vm_map_unlock_and_wait(). 657 */ 658 void 659 vm_map_wakeup(vm_map_t map) 660 { 661 662 /* 663 * Acquire and release map_sleep_mtx to prevent a wakeup() 664 * from being performed (and lost) between the map unlock 665 * and the msleep() in _vm_map_unlock_and_wait(). 666 */ 667 mtx_lock(&map_sleep_mtx); 668 mtx_unlock(&map_sleep_mtx); 669 wakeup(&map->root); 670 } 671 672 long 673 vmspace_resident_count(struct vmspace *vmspace) 674 { 675 return pmap_resident_count(vmspace_pmap(vmspace)); 676 } 677 678 long 679 vmspace_wired_count(struct vmspace *vmspace) 680 { 681 return pmap_wired_count(vmspace_pmap(vmspace)); 682 } 683 684 /* 685 * vm_map_create: 686 * 687 * Creates and returns a new empty VM map with 688 * the given physical map structure, and having 689 * the given lower and upper address bounds. 690 */ 691 vm_map_t 692 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 693 { 694 vm_map_t result; 695 696 result = uma_zalloc(mapzone, M_WAITOK); 697 CTR1(KTR_VM, "vm_map_create: %p", result); 698 _vm_map_init(result, pmap, min, max); 699 return (result); 700 } 701 702 /* 703 * Initialize an existing vm_map structure 704 * such as that in the vmspace structure. 705 */ 706 static void 707 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 708 { 709 710 map->header.next = map->header.prev = &map->header; 711 map->needs_wakeup = FALSE; 712 map->system_map = 0; 713 map->pmap = pmap; 714 map->min_offset = min; 715 map->max_offset = max; 716 map->flags = 0; 717 map->root = NULL; 718 map->timestamp = 0; 719 } 720 721 void 722 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 723 { 724 725 _vm_map_init(map, pmap, min, max); 726 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 727 sx_init(&map->lock, "user map"); 728 } 729 730 /* 731 * vm_map_entry_dispose: [ internal use only ] 732 * 733 * Inverse of vm_map_entry_create. 734 */ 735 static void 736 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 737 { 738 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 739 } 740 741 /* 742 * vm_map_entry_create: [ internal use only ] 743 * 744 * Allocates a VM map entry for insertion. 745 * No entry fields are filled in. 746 */ 747 static vm_map_entry_t 748 vm_map_entry_create(vm_map_t map) 749 { 750 vm_map_entry_t new_entry; 751 752 if (map->system_map) 753 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 754 else 755 new_entry = uma_zalloc(mapentzone, M_WAITOK); 756 if (new_entry == NULL) 757 panic("vm_map_entry_create: kernel resources exhausted"); 758 return (new_entry); 759 } 760 761 /* 762 * vm_map_entry_set_behavior: 763 * 764 * Set the expected access behavior, either normal, random, or 765 * sequential. 766 */ 767 static inline void 768 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 769 { 770 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 771 (behavior & MAP_ENTRY_BEHAV_MASK); 772 } 773 774 /* 775 * vm_map_entry_set_max_free: 776 * 777 * Set the max_free field in a vm_map_entry. 778 */ 779 static inline void 780 vm_map_entry_set_max_free(vm_map_entry_t entry) 781 { 782 783 entry->max_free = entry->adj_free; 784 if (entry->left != NULL && entry->left->max_free > entry->max_free) 785 entry->max_free = entry->left->max_free; 786 if (entry->right != NULL && entry->right->max_free > entry->max_free) 787 entry->max_free = entry->right->max_free; 788 } 789 790 /* 791 * vm_map_entry_splay: 792 * 793 * The Sleator and Tarjan top-down splay algorithm with the 794 * following variation. Max_free must be computed bottom-up, so 795 * on the downward pass, maintain the left and right spines in 796 * reverse order. Then, make a second pass up each side to fix 797 * the pointers and compute max_free. The time bound is O(log n) 798 * amortized. 799 * 800 * The new root is the vm_map_entry containing "addr", or else an 801 * adjacent entry (lower or higher) if addr is not in the tree. 802 * 803 * The map must be locked, and leaves it so. 804 * 805 * Returns: the new root. 806 */ 807 static vm_map_entry_t 808 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 809 { 810 vm_map_entry_t llist, rlist; 811 vm_map_entry_t ltree, rtree; 812 vm_map_entry_t y; 813 814 /* Special case of empty tree. */ 815 if (root == NULL) 816 return (root); 817 818 /* 819 * Pass One: Splay down the tree until we find addr or a NULL 820 * pointer where addr would go. llist and rlist are the two 821 * sides in reverse order (bottom-up), with llist linked by 822 * the right pointer and rlist linked by the left pointer in 823 * the vm_map_entry. Wait until Pass Two to set max_free on 824 * the two spines. 825 */ 826 llist = NULL; 827 rlist = NULL; 828 for (;;) { 829 /* root is never NULL in here. */ 830 if (addr < root->start) { 831 y = root->left; 832 if (y == NULL) 833 break; 834 if (addr < y->start && y->left != NULL) { 835 /* Rotate right and put y on rlist. */ 836 root->left = y->right; 837 y->right = root; 838 vm_map_entry_set_max_free(root); 839 root = y->left; 840 y->left = rlist; 841 rlist = y; 842 } else { 843 /* Put root on rlist. */ 844 root->left = rlist; 845 rlist = root; 846 root = y; 847 } 848 } else if (addr >= root->end) { 849 y = root->right; 850 if (y == NULL) 851 break; 852 if (addr >= y->end && y->right != NULL) { 853 /* Rotate left and put y on llist. */ 854 root->right = y->left; 855 y->left = root; 856 vm_map_entry_set_max_free(root); 857 root = y->right; 858 y->right = llist; 859 llist = y; 860 } else { 861 /* Put root on llist. */ 862 root->right = llist; 863 llist = root; 864 root = y; 865 } 866 } else 867 break; 868 } 869 870 /* 871 * Pass Two: Walk back up the two spines, flip the pointers 872 * and set max_free. The subtrees of the root go at the 873 * bottom of llist and rlist. 874 */ 875 ltree = root->left; 876 while (llist != NULL) { 877 y = llist->right; 878 llist->right = ltree; 879 vm_map_entry_set_max_free(llist); 880 ltree = llist; 881 llist = y; 882 } 883 rtree = root->right; 884 while (rlist != NULL) { 885 y = rlist->left; 886 rlist->left = rtree; 887 vm_map_entry_set_max_free(rlist); 888 rtree = rlist; 889 rlist = y; 890 } 891 892 /* 893 * Final assembly: add ltree and rtree as subtrees of root. 894 */ 895 root->left = ltree; 896 root->right = rtree; 897 vm_map_entry_set_max_free(root); 898 899 return (root); 900 } 901 902 /* 903 * vm_map_entry_{un,}link: 904 * 905 * Insert/remove entries from maps. 906 */ 907 static void 908 vm_map_entry_link(vm_map_t map, 909 vm_map_entry_t after_where, 910 vm_map_entry_t entry) 911 { 912 913 CTR4(KTR_VM, 914 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 915 map->nentries, entry, after_where); 916 VM_MAP_ASSERT_LOCKED(map); 917 map->nentries++; 918 entry->prev = after_where; 919 entry->next = after_where->next; 920 entry->next->prev = entry; 921 after_where->next = entry; 922 923 if (after_where != &map->header) { 924 if (after_where != map->root) 925 vm_map_entry_splay(after_where->start, map->root); 926 entry->right = after_where->right; 927 entry->left = after_where; 928 after_where->right = NULL; 929 after_where->adj_free = entry->start - after_where->end; 930 vm_map_entry_set_max_free(after_where); 931 } else { 932 entry->right = map->root; 933 entry->left = NULL; 934 } 935 entry->adj_free = (entry->next == &map->header ? map->max_offset : 936 entry->next->start) - entry->end; 937 vm_map_entry_set_max_free(entry); 938 map->root = entry; 939 } 940 941 static void 942 vm_map_entry_unlink(vm_map_t map, 943 vm_map_entry_t entry) 944 { 945 vm_map_entry_t next, prev, root; 946 947 VM_MAP_ASSERT_LOCKED(map); 948 if (entry != map->root) 949 vm_map_entry_splay(entry->start, map->root); 950 if (entry->left == NULL) 951 root = entry->right; 952 else { 953 root = vm_map_entry_splay(entry->start, entry->left); 954 root->right = entry->right; 955 root->adj_free = (entry->next == &map->header ? map->max_offset : 956 entry->next->start) - root->end; 957 vm_map_entry_set_max_free(root); 958 } 959 map->root = root; 960 961 prev = entry->prev; 962 next = entry->next; 963 next->prev = prev; 964 prev->next = next; 965 map->nentries--; 966 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 967 map->nentries, entry); 968 } 969 970 /* 971 * vm_map_entry_resize_free: 972 * 973 * Recompute the amount of free space following a vm_map_entry 974 * and propagate that value up the tree. Call this function after 975 * resizing a map entry in-place, that is, without a call to 976 * vm_map_entry_link() or _unlink(). 977 * 978 * The map must be locked, and leaves it so. 979 */ 980 static void 981 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 982 { 983 984 /* 985 * Using splay trees without parent pointers, propagating 986 * max_free up the tree is done by moving the entry to the 987 * root and making the change there. 988 */ 989 if (entry != map->root) 990 map->root = vm_map_entry_splay(entry->start, map->root); 991 992 entry->adj_free = (entry->next == &map->header ? map->max_offset : 993 entry->next->start) - entry->end; 994 vm_map_entry_set_max_free(entry); 995 } 996 997 /* 998 * vm_map_lookup_entry: [ internal use only ] 999 * 1000 * Finds the map entry containing (or 1001 * immediately preceding) the specified address 1002 * in the given map; the entry is returned 1003 * in the "entry" parameter. The boolean 1004 * result indicates whether the address is 1005 * actually contained in the map. 1006 */ 1007 boolean_t 1008 vm_map_lookup_entry( 1009 vm_map_t map, 1010 vm_offset_t address, 1011 vm_map_entry_t *entry) /* OUT */ 1012 { 1013 vm_map_entry_t cur; 1014 boolean_t locked; 1015 1016 /* 1017 * If the map is empty, then the map entry immediately preceding 1018 * "address" is the map's header. 1019 */ 1020 cur = map->root; 1021 if (cur == NULL) 1022 *entry = &map->header; 1023 else if (address >= cur->start && cur->end > address) { 1024 *entry = cur; 1025 return (TRUE); 1026 } else if ((locked = vm_map_locked(map)) || 1027 sx_try_upgrade(&map->lock)) { 1028 /* 1029 * Splay requires a write lock on the map. However, it only 1030 * restructures the binary search tree; it does not otherwise 1031 * change the map. Thus, the map's timestamp need not change 1032 * on a temporary upgrade. 1033 */ 1034 map->root = cur = vm_map_entry_splay(address, cur); 1035 if (!locked) 1036 sx_downgrade(&map->lock); 1037 1038 /* 1039 * If "address" is contained within a map entry, the new root 1040 * is that map entry. Otherwise, the new root is a map entry 1041 * immediately before or after "address". 1042 */ 1043 if (address >= cur->start) { 1044 *entry = cur; 1045 if (cur->end > address) 1046 return (TRUE); 1047 } else 1048 *entry = cur->prev; 1049 } else 1050 /* 1051 * Since the map is only locked for read access, perform a 1052 * standard binary search tree lookup for "address". 1053 */ 1054 for (;;) { 1055 if (address < cur->start) { 1056 if (cur->left == NULL) { 1057 *entry = cur->prev; 1058 break; 1059 } 1060 cur = cur->left; 1061 } else if (cur->end > address) { 1062 *entry = cur; 1063 return (TRUE); 1064 } else { 1065 if (cur->right == NULL) { 1066 *entry = cur; 1067 break; 1068 } 1069 cur = cur->right; 1070 } 1071 } 1072 return (FALSE); 1073 } 1074 1075 /* 1076 * vm_map_insert: 1077 * 1078 * Inserts the given whole VM object into the target 1079 * map at the specified address range. The object's 1080 * size should match that of the address range. 1081 * 1082 * Requires that the map be locked, and leaves it so. 1083 * 1084 * If object is non-NULL, ref count must be bumped by caller 1085 * prior to making call to account for the new entry. 1086 */ 1087 int 1088 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1089 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 1090 int cow) 1091 { 1092 vm_map_entry_t new_entry; 1093 vm_map_entry_t prev_entry; 1094 vm_map_entry_t temp_entry; 1095 vm_eflags_t protoeflags; 1096 struct uidinfo *uip; 1097 boolean_t charge_prev_obj; 1098 1099 VM_MAP_ASSERT_LOCKED(map); 1100 1101 /* 1102 * Check that the start and end points are not bogus. 1103 */ 1104 if ((start < map->min_offset) || (end > map->max_offset) || 1105 (start >= end)) 1106 return (KERN_INVALID_ADDRESS); 1107 1108 /* 1109 * Find the entry prior to the proposed starting address; if it's part 1110 * of an existing entry, this range is bogus. 1111 */ 1112 if (vm_map_lookup_entry(map, start, &temp_entry)) 1113 return (KERN_NO_SPACE); 1114 1115 prev_entry = temp_entry; 1116 1117 /* 1118 * Assert that the next entry doesn't overlap the end point. 1119 */ 1120 if ((prev_entry->next != &map->header) && 1121 (prev_entry->next->start < end)) 1122 return (KERN_NO_SPACE); 1123 1124 protoeflags = 0; 1125 charge_prev_obj = FALSE; 1126 1127 if (cow & MAP_COPY_ON_WRITE) 1128 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 1129 1130 if (cow & MAP_NOFAULT) { 1131 protoeflags |= MAP_ENTRY_NOFAULT; 1132 1133 KASSERT(object == NULL, 1134 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1135 } 1136 if (cow & MAP_DISABLE_SYNCER) 1137 protoeflags |= MAP_ENTRY_NOSYNC; 1138 if (cow & MAP_DISABLE_COREDUMP) 1139 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1140 1141 uip = NULL; 1142 KASSERT((object != kmem_object && object != kernel_object) || 1143 ((object == kmem_object || object == kernel_object) && 1144 !(protoeflags & MAP_ENTRY_NEEDS_COPY)), 1145 ("kmem or kernel object and cow")); 1146 if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) 1147 goto charged; 1148 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1149 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1150 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1151 return (KERN_RESOURCE_SHORTAGE); 1152 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || 1153 object->uip == NULL, 1154 ("OVERCOMMIT: vm_map_insert o %p", object)); 1155 uip = curthread->td_ucred->cr_ruidinfo; 1156 uihold(uip); 1157 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY)) 1158 charge_prev_obj = TRUE; 1159 } 1160 1161 charged: 1162 /* Expand the kernel pmap, if necessary. */ 1163 if (map == kernel_map && end > kernel_vm_end) 1164 pmap_growkernel(end); 1165 if (object != NULL) { 1166 /* 1167 * OBJ_ONEMAPPING must be cleared unless this mapping 1168 * is trivially proven to be the only mapping for any 1169 * of the object's pages. (Object granularity 1170 * reference counting is insufficient to recognize 1171 * aliases with precision.) 1172 */ 1173 VM_OBJECT_LOCK(object); 1174 if (object->ref_count > 1 || object->shadow_count != 0) 1175 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1176 VM_OBJECT_UNLOCK(object); 1177 } 1178 else if ((prev_entry != &map->header) && 1179 (prev_entry->eflags == protoeflags) && 1180 (prev_entry->end == start) && 1181 (prev_entry->wired_count == 0) && 1182 (prev_entry->uip == uip || 1183 (prev_entry->object.vm_object != NULL && 1184 (prev_entry->object.vm_object->uip == uip))) && 1185 vm_object_coalesce(prev_entry->object.vm_object, 1186 prev_entry->offset, 1187 (vm_size_t)(prev_entry->end - prev_entry->start), 1188 (vm_size_t)(end - prev_entry->end), charge_prev_obj)) { 1189 /* 1190 * We were able to extend the object. Determine if we 1191 * can extend the previous map entry to include the 1192 * new range as well. 1193 */ 1194 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 1195 (prev_entry->protection == prot) && 1196 (prev_entry->max_protection == max)) { 1197 map->size += (end - prev_entry->end); 1198 prev_entry->end = end; 1199 vm_map_entry_resize_free(map, prev_entry); 1200 vm_map_simplify_entry(map, prev_entry); 1201 if (uip != NULL) 1202 uifree(uip); 1203 return (KERN_SUCCESS); 1204 } 1205 1206 /* 1207 * If we can extend the object but cannot extend the 1208 * map entry, we have to create a new map entry. We 1209 * must bump the ref count on the extended object to 1210 * account for it. object may be NULL. 1211 */ 1212 object = prev_entry->object.vm_object; 1213 offset = prev_entry->offset + 1214 (prev_entry->end - prev_entry->start); 1215 vm_object_reference(object); 1216 if (uip != NULL && object != NULL && object->uip != NULL && 1217 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1218 /* Object already accounts for this uid. */ 1219 uifree(uip); 1220 uip = NULL; 1221 } 1222 } 1223 1224 /* 1225 * NOTE: if conditionals fail, object can be NULL here. This occurs 1226 * in things like the buffer map where we manage kva but do not manage 1227 * backing objects. 1228 */ 1229 1230 /* 1231 * Create a new entry 1232 */ 1233 new_entry = vm_map_entry_create(map); 1234 new_entry->start = start; 1235 new_entry->end = end; 1236 new_entry->uip = NULL; 1237 1238 new_entry->eflags = protoeflags; 1239 new_entry->object.vm_object = object; 1240 new_entry->offset = offset; 1241 new_entry->avail_ssize = 0; 1242 1243 new_entry->inheritance = VM_INHERIT_DEFAULT; 1244 new_entry->protection = prot; 1245 new_entry->max_protection = max; 1246 new_entry->wired_count = 0; 1247 1248 KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry), 1249 ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); 1250 new_entry->uip = uip; 1251 1252 /* 1253 * Insert the new entry into the list 1254 */ 1255 vm_map_entry_link(map, prev_entry, new_entry); 1256 map->size += new_entry->end - new_entry->start; 1257 1258 #if 0 1259 /* 1260 * Temporarily removed to avoid MAP_STACK panic, due to 1261 * MAP_STACK being a huge hack. Will be added back in 1262 * when MAP_STACK (and the user stack mapping) is fixed. 1263 */ 1264 /* 1265 * It may be possible to simplify the entry 1266 */ 1267 vm_map_simplify_entry(map, new_entry); 1268 #endif 1269 1270 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 1271 vm_map_pmap_enter(map, start, prot, 1272 object, OFF_TO_IDX(offset), end - start, 1273 cow & MAP_PREFAULT_PARTIAL); 1274 } 1275 1276 return (KERN_SUCCESS); 1277 } 1278 1279 /* 1280 * vm_map_findspace: 1281 * 1282 * Find the first fit (lowest VM address) for "length" free bytes 1283 * beginning at address >= start in the given map. 1284 * 1285 * In a vm_map_entry, "adj_free" is the amount of free space 1286 * adjacent (higher address) to this entry, and "max_free" is the 1287 * maximum amount of contiguous free space in its subtree. This 1288 * allows finding a free region in one path down the tree, so 1289 * O(log n) amortized with splay trees. 1290 * 1291 * The map must be locked, and leaves it so. 1292 * 1293 * Returns: 0 on success, and starting address in *addr, 1294 * 1 if insufficient space. 1295 */ 1296 int 1297 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1298 vm_offset_t *addr) /* OUT */ 1299 { 1300 vm_map_entry_t entry; 1301 vm_offset_t st; 1302 1303 /* 1304 * Request must fit within min/max VM address and must avoid 1305 * address wrap. 1306 */ 1307 if (start < map->min_offset) 1308 start = map->min_offset; 1309 if (start + length > map->max_offset || start + length < start) 1310 return (1); 1311 1312 /* Empty tree means wide open address space. */ 1313 if (map->root == NULL) { 1314 *addr = start; 1315 return (0); 1316 } 1317 1318 /* 1319 * After splay, if start comes before root node, then there 1320 * must be a gap from start to the root. 1321 */ 1322 map->root = vm_map_entry_splay(start, map->root); 1323 if (start + length <= map->root->start) { 1324 *addr = start; 1325 return (0); 1326 } 1327 1328 /* 1329 * Root is the last node that might begin its gap before 1330 * start, and this is the last comparison where address 1331 * wrap might be a problem. 1332 */ 1333 st = (start > map->root->end) ? start : map->root->end; 1334 if (length <= map->root->end + map->root->adj_free - st) { 1335 *addr = st; 1336 return (0); 1337 } 1338 1339 /* With max_free, can immediately tell if no solution. */ 1340 entry = map->root->right; 1341 if (entry == NULL || length > entry->max_free) 1342 return (1); 1343 1344 /* 1345 * Search the right subtree in the order: left subtree, root, 1346 * right subtree (first fit). The previous splay implies that 1347 * all regions in the right subtree have addresses > start. 1348 */ 1349 while (entry != NULL) { 1350 if (entry->left != NULL && entry->left->max_free >= length) 1351 entry = entry->left; 1352 else if (entry->adj_free >= length) { 1353 *addr = entry->end; 1354 return (0); 1355 } else 1356 entry = entry->right; 1357 } 1358 1359 /* Can't get here, so panic if we do. */ 1360 panic("vm_map_findspace: max_free corrupt"); 1361 } 1362 1363 int 1364 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1365 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1366 vm_prot_t max, int cow) 1367 { 1368 vm_offset_t end; 1369 int result; 1370 1371 end = start + length; 1372 vm_map_lock(map); 1373 VM_MAP_RANGE_CHECK(map, start, end); 1374 (void) vm_map_delete(map, start, end); 1375 result = vm_map_insert(map, object, offset, start, end, prot, 1376 max, cow); 1377 vm_map_unlock(map); 1378 return (result); 1379 } 1380 1381 /* 1382 * vm_map_find finds an unallocated region in the target address 1383 * map with the given length. The search is defined to be 1384 * first-fit from the specified address; the region found is 1385 * returned in the same parameter. 1386 * 1387 * If object is non-NULL, ref count must be bumped by caller 1388 * prior to making call to account for the new entry. 1389 */ 1390 int 1391 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1392 vm_offset_t *addr, /* IN/OUT */ 1393 vm_size_t length, int find_space, vm_prot_t prot, 1394 vm_prot_t max, int cow) 1395 { 1396 vm_offset_t start; 1397 int result; 1398 1399 start = *addr; 1400 vm_map_lock(map); 1401 do { 1402 if (find_space != VMFS_NO_SPACE) { 1403 if (vm_map_findspace(map, start, length, addr)) { 1404 vm_map_unlock(map); 1405 return (KERN_NO_SPACE); 1406 } 1407 switch (find_space) { 1408 case VMFS_ALIGNED_SPACE: 1409 pmap_align_superpage(object, offset, addr, 1410 length); 1411 break; 1412 #ifdef VMFS_TLB_ALIGNED_SPACE 1413 case VMFS_TLB_ALIGNED_SPACE: 1414 pmap_align_tlb(addr); 1415 break; 1416 #endif 1417 default: 1418 break; 1419 } 1420 1421 start = *addr; 1422 } 1423 result = vm_map_insert(map, object, offset, start, start + 1424 length, prot, max, cow); 1425 } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE 1426 #ifdef VMFS_TLB_ALIGNED_SPACE 1427 || find_space == VMFS_TLB_ALIGNED_SPACE 1428 #endif 1429 )); 1430 vm_map_unlock(map); 1431 return (result); 1432 } 1433 1434 /* 1435 * vm_map_simplify_entry: 1436 * 1437 * Simplify the given map entry by merging with either neighbor. This 1438 * routine also has the ability to merge with both neighbors. 1439 * 1440 * The map must be locked. 1441 * 1442 * This routine guarentees that the passed entry remains valid (though 1443 * possibly extended). When merging, this routine may delete one or 1444 * both neighbors. 1445 */ 1446 void 1447 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1448 { 1449 vm_map_entry_t next, prev; 1450 vm_size_t prevsize, esize; 1451 1452 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1453 return; 1454 1455 prev = entry->prev; 1456 if (prev != &map->header) { 1457 prevsize = prev->end - prev->start; 1458 if ( (prev->end == entry->start) && 1459 (prev->object.vm_object == entry->object.vm_object) && 1460 (!prev->object.vm_object || 1461 (prev->offset + prevsize == entry->offset)) && 1462 (prev->eflags == entry->eflags) && 1463 (prev->protection == entry->protection) && 1464 (prev->max_protection == entry->max_protection) && 1465 (prev->inheritance == entry->inheritance) && 1466 (prev->wired_count == entry->wired_count) && 1467 (prev->uip == entry->uip)) { 1468 vm_map_entry_unlink(map, prev); 1469 entry->start = prev->start; 1470 entry->offset = prev->offset; 1471 if (entry->prev != &map->header) 1472 vm_map_entry_resize_free(map, entry->prev); 1473 1474 /* 1475 * If the backing object is a vnode object, 1476 * vm_object_deallocate() calls vrele(). 1477 * However, vrele() does not lock the vnode 1478 * because the vnode has additional 1479 * references. Thus, the map lock can be kept 1480 * without causing a lock-order reversal with 1481 * the vnode lock. 1482 */ 1483 if (prev->object.vm_object) 1484 vm_object_deallocate(prev->object.vm_object); 1485 if (prev->uip != NULL) 1486 uifree(prev->uip); 1487 vm_map_entry_dispose(map, prev); 1488 } 1489 } 1490 1491 next = entry->next; 1492 if (next != &map->header) { 1493 esize = entry->end - entry->start; 1494 if ((entry->end == next->start) && 1495 (next->object.vm_object == entry->object.vm_object) && 1496 (!entry->object.vm_object || 1497 (entry->offset + esize == next->offset)) && 1498 (next->eflags == entry->eflags) && 1499 (next->protection == entry->protection) && 1500 (next->max_protection == entry->max_protection) && 1501 (next->inheritance == entry->inheritance) && 1502 (next->wired_count == entry->wired_count) && 1503 (next->uip == entry->uip)) { 1504 vm_map_entry_unlink(map, next); 1505 entry->end = next->end; 1506 vm_map_entry_resize_free(map, entry); 1507 1508 /* 1509 * See comment above. 1510 */ 1511 if (next->object.vm_object) 1512 vm_object_deallocate(next->object.vm_object); 1513 if (next->uip != NULL) 1514 uifree(next->uip); 1515 vm_map_entry_dispose(map, next); 1516 } 1517 } 1518 } 1519 /* 1520 * vm_map_clip_start: [ internal use only ] 1521 * 1522 * Asserts that the given entry begins at or after 1523 * the specified address; if necessary, 1524 * it splits the entry into two. 1525 */ 1526 #define vm_map_clip_start(map, entry, startaddr) \ 1527 { \ 1528 if (startaddr > entry->start) \ 1529 _vm_map_clip_start(map, entry, startaddr); \ 1530 } 1531 1532 /* 1533 * This routine is called only when it is known that 1534 * the entry must be split. 1535 */ 1536 static void 1537 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1538 { 1539 vm_map_entry_t new_entry; 1540 1541 VM_MAP_ASSERT_LOCKED(map); 1542 1543 /* 1544 * Split off the front portion -- note that we must insert the new 1545 * entry BEFORE this one, so that this entry has the specified 1546 * starting address. 1547 */ 1548 vm_map_simplify_entry(map, entry); 1549 1550 /* 1551 * If there is no object backing this entry, we might as well create 1552 * one now. If we defer it, an object can get created after the map 1553 * is clipped, and individual objects will be created for the split-up 1554 * map. This is a bit of a hack, but is also about the best place to 1555 * put this improvement. 1556 */ 1557 if (entry->object.vm_object == NULL && !map->system_map) { 1558 vm_object_t object; 1559 object = vm_object_allocate(OBJT_DEFAULT, 1560 atop(entry->end - entry->start)); 1561 entry->object.vm_object = object; 1562 entry->offset = 0; 1563 if (entry->uip != NULL) { 1564 object->uip = entry->uip; 1565 object->charge = entry->end - entry->start; 1566 entry->uip = NULL; 1567 } 1568 } else if (entry->object.vm_object != NULL && 1569 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1570 entry->uip != NULL) { 1571 VM_OBJECT_LOCK(entry->object.vm_object); 1572 KASSERT(entry->object.vm_object->uip == NULL, 1573 ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry)); 1574 entry->object.vm_object->uip = entry->uip; 1575 entry->object.vm_object->charge = entry->end - entry->start; 1576 VM_OBJECT_UNLOCK(entry->object.vm_object); 1577 entry->uip = NULL; 1578 } 1579 1580 new_entry = vm_map_entry_create(map); 1581 *new_entry = *entry; 1582 1583 new_entry->end = start; 1584 entry->offset += (start - entry->start); 1585 entry->start = start; 1586 if (new_entry->uip != NULL) 1587 uihold(entry->uip); 1588 1589 vm_map_entry_link(map, entry->prev, new_entry); 1590 1591 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1592 vm_object_reference(new_entry->object.vm_object); 1593 } 1594 } 1595 1596 /* 1597 * vm_map_clip_end: [ internal use only ] 1598 * 1599 * Asserts that the given entry ends at or before 1600 * the specified address; if necessary, 1601 * it splits the entry into two. 1602 */ 1603 #define vm_map_clip_end(map, entry, endaddr) \ 1604 { \ 1605 if ((endaddr) < (entry->end)) \ 1606 _vm_map_clip_end((map), (entry), (endaddr)); \ 1607 } 1608 1609 /* 1610 * This routine is called only when it is known that 1611 * the entry must be split. 1612 */ 1613 static void 1614 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1615 { 1616 vm_map_entry_t new_entry; 1617 1618 VM_MAP_ASSERT_LOCKED(map); 1619 1620 /* 1621 * If there is no object backing this entry, we might as well create 1622 * one now. If we defer it, an object can get created after the map 1623 * is clipped, and individual objects will be created for the split-up 1624 * map. This is a bit of a hack, but is also about the best place to 1625 * put this improvement. 1626 */ 1627 if (entry->object.vm_object == NULL && !map->system_map) { 1628 vm_object_t object; 1629 object = vm_object_allocate(OBJT_DEFAULT, 1630 atop(entry->end - entry->start)); 1631 entry->object.vm_object = object; 1632 entry->offset = 0; 1633 if (entry->uip != NULL) { 1634 object->uip = entry->uip; 1635 object->charge = entry->end - entry->start; 1636 entry->uip = NULL; 1637 } 1638 } else if (entry->object.vm_object != NULL && 1639 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1640 entry->uip != NULL) { 1641 VM_OBJECT_LOCK(entry->object.vm_object); 1642 KASSERT(entry->object.vm_object->uip == NULL, 1643 ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry)); 1644 entry->object.vm_object->uip = entry->uip; 1645 entry->object.vm_object->charge = entry->end - entry->start; 1646 VM_OBJECT_UNLOCK(entry->object.vm_object); 1647 entry->uip = NULL; 1648 } 1649 1650 /* 1651 * Create a new entry and insert it AFTER the specified entry 1652 */ 1653 new_entry = vm_map_entry_create(map); 1654 *new_entry = *entry; 1655 1656 new_entry->start = entry->end = end; 1657 new_entry->offset += (end - entry->start); 1658 if (new_entry->uip != NULL) 1659 uihold(entry->uip); 1660 1661 vm_map_entry_link(map, entry, new_entry); 1662 1663 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1664 vm_object_reference(new_entry->object.vm_object); 1665 } 1666 } 1667 1668 /* 1669 * vm_map_submap: [ kernel use only ] 1670 * 1671 * Mark the given range as handled by a subordinate map. 1672 * 1673 * This range must have been created with vm_map_find, 1674 * and no other operations may have been performed on this 1675 * range prior to calling vm_map_submap. 1676 * 1677 * Only a limited number of operations can be performed 1678 * within this rage after calling vm_map_submap: 1679 * vm_fault 1680 * [Don't try vm_map_copy!] 1681 * 1682 * To remove a submapping, one must first remove the 1683 * range from the superior map, and then destroy the 1684 * submap (if desired). [Better yet, don't try it.] 1685 */ 1686 int 1687 vm_map_submap( 1688 vm_map_t map, 1689 vm_offset_t start, 1690 vm_offset_t end, 1691 vm_map_t submap) 1692 { 1693 vm_map_entry_t entry; 1694 int result = KERN_INVALID_ARGUMENT; 1695 1696 vm_map_lock(map); 1697 1698 VM_MAP_RANGE_CHECK(map, start, end); 1699 1700 if (vm_map_lookup_entry(map, start, &entry)) { 1701 vm_map_clip_start(map, entry, start); 1702 } else 1703 entry = entry->next; 1704 1705 vm_map_clip_end(map, entry, end); 1706 1707 if ((entry->start == start) && (entry->end == end) && 1708 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1709 (entry->object.vm_object == NULL)) { 1710 entry->object.sub_map = submap; 1711 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1712 result = KERN_SUCCESS; 1713 } 1714 vm_map_unlock(map); 1715 1716 return (result); 1717 } 1718 1719 /* 1720 * The maximum number of pages to map 1721 */ 1722 #define MAX_INIT_PT 96 1723 1724 /* 1725 * vm_map_pmap_enter: 1726 * 1727 * Preload read-only mappings for the given object's resident pages into 1728 * the given map. This eliminates the soft faults on process startup and 1729 * immediately after an mmap(2). Because these are speculative mappings, 1730 * cached pages are not reactivated and mapped. 1731 */ 1732 void 1733 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1734 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1735 { 1736 vm_offset_t start; 1737 vm_page_t p, p_start; 1738 vm_pindex_t psize, tmpidx; 1739 1740 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1741 return; 1742 VM_OBJECT_LOCK(object); 1743 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1744 pmap_object_init_pt(map->pmap, addr, object, pindex, size); 1745 goto unlock_return; 1746 } 1747 1748 psize = atop(size); 1749 1750 if ((flags & MAP_PREFAULT_PARTIAL) && psize > MAX_INIT_PT && 1751 object->resident_page_count > MAX_INIT_PT) 1752 goto unlock_return; 1753 1754 if (psize + pindex > object->size) { 1755 if (object->size < pindex) 1756 goto unlock_return; 1757 psize = object->size - pindex; 1758 } 1759 1760 start = 0; 1761 p_start = NULL; 1762 1763 p = vm_page_find_least(object, pindex); 1764 /* 1765 * Assert: the variable p is either (1) the page with the 1766 * least pindex greater than or equal to the parameter pindex 1767 * or (2) NULL. 1768 */ 1769 for (; 1770 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1771 p = TAILQ_NEXT(p, listq)) { 1772 /* 1773 * don't allow an madvise to blow away our really 1774 * free pages allocating pv entries. 1775 */ 1776 if ((flags & MAP_PREFAULT_MADVISE) && 1777 cnt.v_free_count < cnt.v_free_reserved) { 1778 psize = tmpidx; 1779 break; 1780 } 1781 if (p->valid == VM_PAGE_BITS_ALL) { 1782 if (p_start == NULL) { 1783 start = addr + ptoa(tmpidx); 1784 p_start = p; 1785 } 1786 } else if (p_start != NULL) { 1787 pmap_enter_object(map->pmap, start, addr + 1788 ptoa(tmpidx), p_start, prot); 1789 p_start = NULL; 1790 } 1791 } 1792 if (p_start != NULL) 1793 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1794 p_start, prot); 1795 unlock_return: 1796 VM_OBJECT_UNLOCK(object); 1797 } 1798 1799 /* 1800 * vm_map_protect: 1801 * 1802 * Sets the protection of the specified address 1803 * region in the target map. If "set_max" is 1804 * specified, the maximum protection is to be set; 1805 * otherwise, only the current protection is affected. 1806 */ 1807 int 1808 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1809 vm_prot_t new_prot, boolean_t set_max) 1810 { 1811 vm_map_entry_t current, entry; 1812 vm_object_t obj; 1813 struct uidinfo *uip; 1814 vm_prot_t old_prot; 1815 1816 vm_map_lock(map); 1817 1818 VM_MAP_RANGE_CHECK(map, start, end); 1819 1820 if (vm_map_lookup_entry(map, start, &entry)) { 1821 vm_map_clip_start(map, entry, start); 1822 } else { 1823 entry = entry->next; 1824 } 1825 1826 /* 1827 * Make a first pass to check for protection violations. 1828 */ 1829 current = entry; 1830 while ((current != &map->header) && (current->start < end)) { 1831 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1832 vm_map_unlock(map); 1833 return (KERN_INVALID_ARGUMENT); 1834 } 1835 if ((new_prot & current->max_protection) != new_prot) { 1836 vm_map_unlock(map); 1837 return (KERN_PROTECTION_FAILURE); 1838 } 1839 current = current->next; 1840 } 1841 1842 1843 /* 1844 * Do an accounting pass for private read-only mappings that 1845 * now will do cow due to allowed write (e.g. debugger sets 1846 * breakpoint on text segment) 1847 */ 1848 for (current = entry; (current != &map->header) && 1849 (current->start < end); current = current->next) { 1850 1851 vm_map_clip_end(map, current, end); 1852 1853 if (set_max || 1854 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 1855 ENTRY_CHARGED(current)) { 1856 continue; 1857 } 1858 1859 uip = curthread->td_ucred->cr_ruidinfo; 1860 obj = current->object.vm_object; 1861 1862 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 1863 if (!swap_reserve(current->end - current->start)) { 1864 vm_map_unlock(map); 1865 return (KERN_RESOURCE_SHORTAGE); 1866 } 1867 uihold(uip); 1868 current->uip = uip; 1869 continue; 1870 } 1871 1872 VM_OBJECT_LOCK(obj); 1873 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 1874 VM_OBJECT_UNLOCK(obj); 1875 continue; 1876 } 1877 1878 /* 1879 * Charge for the whole object allocation now, since 1880 * we cannot distinguish between non-charged and 1881 * charged clipped mapping of the same object later. 1882 */ 1883 KASSERT(obj->charge == 0, 1884 ("vm_map_protect: object %p overcharged\n", obj)); 1885 if (!swap_reserve(ptoa(obj->size))) { 1886 VM_OBJECT_UNLOCK(obj); 1887 vm_map_unlock(map); 1888 return (KERN_RESOURCE_SHORTAGE); 1889 } 1890 1891 uihold(uip); 1892 obj->uip = uip; 1893 obj->charge = ptoa(obj->size); 1894 VM_OBJECT_UNLOCK(obj); 1895 } 1896 1897 /* 1898 * Go back and fix up protections. [Note that clipping is not 1899 * necessary the second time.] 1900 */ 1901 current = entry; 1902 while ((current != &map->header) && (current->start < end)) { 1903 old_prot = current->protection; 1904 1905 if (set_max) 1906 current->protection = 1907 (current->max_protection = new_prot) & 1908 old_prot; 1909 else 1910 current->protection = new_prot; 1911 1912 if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED)) 1913 == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) && 1914 (current->protection & VM_PROT_WRITE) != 0 && 1915 (old_prot & VM_PROT_WRITE) == 0) { 1916 vm_fault_copy_entry(map, map, current, current, NULL); 1917 } 1918 1919 /* 1920 * When restricting access, update the physical map. Worry 1921 * about copy-on-write here. 1922 */ 1923 if ((old_prot & ~current->protection) != 0) { 1924 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1925 VM_PROT_ALL) 1926 pmap_protect(map->pmap, current->start, 1927 current->end, 1928 current->protection & MASK(current)); 1929 #undef MASK 1930 } 1931 vm_map_simplify_entry(map, current); 1932 current = current->next; 1933 } 1934 vm_map_unlock(map); 1935 return (KERN_SUCCESS); 1936 } 1937 1938 /* 1939 * vm_map_madvise: 1940 * 1941 * This routine traverses a processes map handling the madvise 1942 * system call. Advisories are classified as either those effecting 1943 * the vm_map_entry structure, or those effecting the underlying 1944 * objects. 1945 */ 1946 int 1947 vm_map_madvise( 1948 vm_map_t map, 1949 vm_offset_t start, 1950 vm_offset_t end, 1951 int behav) 1952 { 1953 vm_map_entry_t current, entry; 1954 int modify_map = 0; 1955 1956 /* 1957 * Some madvise calls directly modify the vm_map_entry, in which case 1958 * we need to use an exclusive lock on the map and we need to perform 1959 * various clipping operations. Otherwise we only need a read-lock 1960 * on the map. 1961 */ 1962 switch(behav) { 1963 case MADV_NORMAL: 1964 case MADV_SEQUENTIAL: 1965 case MADV_RANDOM: 1966 case MADV_NOSYNC: 1967 case MADV_AUTOSYNC: 1968 case MADV_NOCORE: 1969 case MADV_CORE: 1970 modify_map = 1; 1971 vm_map_lock(map); 1972 break; 1973 case MADV_WILLNEED: 1974 case MADV_DONTNEED: 1975 case MADV_FREE: 1976 vm_map_lock_read(map); 1977 break; 1978 default: 1979 return (KERN_INVALID_ARGUMENT); 1980 } 1981 1982 /* 1983 * Locate starting entry and clip if necessary. 1984 */ 1985 VM_MAP_RANGE_CHECK(map, start, end); 1986 1987 if (vm_map_lookup_entry(map, start, &entry)) { 1988 if (modify_map) 1989 vm_map_clip_start(map, entry, start); 1990 } else { 1991 entry = entry->next; 1992 } 1993 1994 if (modify_map) { 1995 /* 1996 * madvise behaviors that are implemented in the vm_map_entry. 1997 * 1998 * We clip the vm_map_entry so that behavioral changes are 1999 * limited to the specified address range. 2000 */ 2001 for (current = entry; 2002 (current != &map->header) && (current->start < end); 2003 current = current->next 2004 ) { 2005 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2006 continue; 2007 2008 vm_map_clip_end(map, current, end); 2009 2010 switch (behav) { 2011 case MADV_NORMAL: 2012 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2013 break; 2014 case MADV_SEQUENTIAL: 2015 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2016 break; 2017 case MADV_RANDOM: 2018 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2019 break; 2020 case MADV_NOSYNC: 2021 current->eflags |= MAP_ENTRY_NOSYNC; 2022 break; 2023 case MADV_AUTOSYNC: 2024 current->eflags &= ~MAP_ENTRY_NOSYNC; 2025 break; 2026 case MADV_NOCORE: 2027 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2028 break; 2029 case MADV_CORE: 2030 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2031 break; 2032 default: 2033 break; 2034 } 2035 vm_map_simplify_entry(map, current); 2036 } 2037 vm_map_unlock(map); 2038 } else { 2039 vm_pindex_t pindex; 2040 int count; 2041 2042 /* 2043 * madvise behaviors that are implemented in the underlying 2044 * vm_object. 2045 * 2046 * Since we don't clip the vm_map_entry, we have to clip 2047 * the vm_object pindex and count. 2048 */ 2049 for (current = entry; 2050 (current != &map->header) && (current->start < end); 2051 current = current->next 2052 ) { 2053 vm_offset_t useStart; 2054 2055 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2056 continue; 2057 2058 pindex = OFF_TO_IDX(current->offset); 2059 count = atop(current->end - current->start); 2060 useStart = current->start; 2061 2062 if (current->start < start) { 2063 pindex += atop(start - current->start); 2064 count -= atop(start - current->start); 2065 useStart = start; 2066 } 2067 if (current->end > end) 2068 count -= atop(current->end - end); 2069 2070 if (count <= 0) 2071 continue; 2072 2073 vm_object_madvise(current->object.vm_object, 2074 pindex, count, behav); 2075 if (behav == MADV_WILLNEED) { 2076 vm_map_pmap_enter(map, 2077 useStart, 2078 current->protection, 2079 current->object.vm_object, 2080 pindex, 2081 (count << PAGE_SHIFT), 2082 MAP_PREFAULT_MADVISE 2083 ); 2084 } 2085 } 2086 vm_map_unlock_read(map); 2087 } 2088 return (0); 2089 } 2090 2091 2092 /* 2093 * vm_map_inherit: 2094 * 2095 * Sets the inheritance of the specified address 2096 * range in the target map. Inheritance 2097 * affects how the map will be shared with 2098 * child maps at the time of vmspace_fork. 2099 */ 2100 int 2101 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2102 vm_inherit_t new_inheritance) 2103 { 2104 vm_map_entry_t entry; 2105 vm_map_entry_t temp_entry; 2106 2107 switch (new_inheritance) { 2108 case VM_INHERIT_NONE: 2109 case VM_INHERIT_COPY: 2110 case VM_INHERIT_SHARE: 2111 break; 2112 default: 2113 return (KERN_INVALID_ARGUMENT); 2114 } 2115 vm_map_lock(map); 2116 VM_MAP_RANGE_CHECK(map, start, end); 2117 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2118 entry = temp_entry; 2119 vm_map_clip_start(map, entry, start); 2120 } else 2121 entry = temp_entry->next; 2122 while ((entry != &map->header) && (entry->start < end)) { 2123 vm_map_clip_end(map, entry, end); 2124 entry->inheritance = new_inheritance; 2125 vm_map_simplify_entry(map, entry); 2126 entry = entry->next; 2127 } 2128 vm_map_unlock(map); 2129 return (KERN_SUCCESS); 2130 } 2131 2132 /* 2133 * vm_map_unwire: 2134 * 2135 * Implements both kernel and user unwiring. 2136 */ 2137 int 2138 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2139 int flags) 2140 { 2141 vm_map_entry_t entry, first_entry, tmp_entry; 2142 vm_offset_t saved_start; 2143 unsigned int last_timestamp; 2144 int rv; 2145 boolean_t need_wakeup, result, user_unwire; 2146 2147 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2148 vm_map_lock(map); 2149 VM_MAP_RANGE_CHECK(map, start, end); 2150 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2151 if (flags & VM_MAP_WIRE_HOLESOK) 2152 first_entry = first_entry->next; 2153 else { 2154 vm_map_unlock(map); 2155 return (KERN_INVALID_ADDRESS); 2156 } 2157 } 2158 last_timestamp = map->timestamp; 2159 entry = first_entry; 2160 while (entry != &map->header && entry->start < end) { 2161 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2162 /* 2163 * We have not yet clipped the entry. 2164 */ 2165 saved_start = (start >= entry->start) ? start : 2166 entry->start; 2167 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2168 if (vm_map_unlock_and_wait(map, 0)) { 2169 /* 2170 * Allow interruption of user unwiring? 2171 */ 2172 } 2173 vm_map_lock(map); 2174 if (last_timestamp+1 != map->timestamp) { 2175 /* 2176 * Look again for the entry because the map was 2177 * modified while it was unlocked. 2178 * Specifically, the entry may have been 2179 * clipped, merged, or deleted. 2180 */ 2181 if (!vm_map_lookup_entry(map, saved_start, 2182 &tmp_entry)) { 2183 if (flags & VM_MAP_WIRE_HOLESOK) 2184 tmp_entry = tmp_entry->next; 2185 else { 2186 if (saved_start == start) { 2187 /* 2188 * First_entry has been deleted. 2189 */ 2190 vm_map_unlock(map); 2191 return (KERN_INVALID_ADDRESS); 2192 } 2193 end = saved_start; 2194 rv = KERN_INVALID_ADDRESS; 2195 goto done; 2196 } 2197 } 2198 if (entry == first_entry) 2199 first_entry = tmp_entry; 2200 else 2201 first_entry = NULL; 2202 entry = tmp_entry; 2203 } 2204 last_timestamp = map->timestamp; 2205 continue; 2206 } 2207 vm_map_clip_start(map, entry, start); 2208 vm_map_clip_end(map, entry, end); 2209 /* 2210 * Mark the entry in case the map lock is released. (See 2211 * above.) 2212 */ 2213 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2214 /* 2215 * Check the map for holes in the specified region. 2216 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2217 */ 2218 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2219 (entry->end < end && (entry->next == &map->header || 2220 entry->next->start > entry->end))) { 2221 end = entry->end; 2222 rv = KERN_INVALID_ADDRESS; 2223 goto done; 2224 } 2225 /* 2226 * If system unwiring, require that the entry is system wired. 2227 */ 2228 if (!user_unwire && 2229 vm_map_entry_system_wired_count(entry) == 0) { 2230 end = entry->end; 2231 rv = KERN_INVALID_ARGUMENT; 2232 goto done; 2233 } 2234 entry = entry->next; 2235 } 2236 rv = KERN_SUCCESS; 2237 done: 2238 need_wakeup = FALSE; 2239 if (first_entry == NULL) { 2240 result = vm_map_lookup_entry(map, start, &first_entry); 2241 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2242 first_entry = first_entry->next; 2243 else 2244 KASSERT(result, ("vm_map_unwire: lookup failed")); 2245 } 2246 entry = first_entry; 2247 while (entry != &map->header && entry->start < end) { 2248 if (rv == KERN_SUCCESS && (!user_unwire || 2249 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2250 if (user_unwire) 2251 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2252 entry->wired_count--; 2253 if (entry->wired_count == 0) { 2254 /* 2255 * Retain the map lock. 2256 */ 2257 vm_fault_unwire(map, entry->start, entry->end, 2258 entry->object.vm_object != NULL && 2259 (entry->object.vm_object->type == OBJT_DEVICE || 2260 entry->object.vm_object->type == OBJT_SG)); 2261 } 2262 } 2263 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2264 ("vm_map_unwire: in-transition flag missing")); 2265 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2266 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2267 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2268 need_wakeup = TRUE; 2269 } 2270 vm_map_simplify_entry(map, entry); 2271 entry = entry->next; 2272 } 2273 vm_map_unlock(map); 2274 if (need_wakeup) 2275 vm_map_wakeup(map); 2276 return (rv); 2277 } 2278 2279 /* 2280 * vm_map_wire: 2281 * 2282 * Implements both kernel and user wiring. 2283 */ 2284 int 2285 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2286 int flags) 2287 { 2288 vm_map_entry_t entry, first_entry, tmp_entry; 2289 vm_offset_t saved_end, saved_start; 2290 unsigned int last_timestamp; 2291 int rv; 2292 boolean_t fictitious, need_wakeup, result, user_wire; 2293 2294 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2295 vm_map_lock(map); 2296 VM_MAP_RANGE_CHECK(map, start, end); 2297 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2298 if (flags & VM_MAP_WIRE_HOLESOK) 2299 first_entry = first_entry->next; 2300 else { 2301 vm_map_unlock(map); 2302 return (KERN_INVALID_ADDRESS); 2303 } 2304 } 2305 last_timestamp = map->timestamp; 2306 entry = first_entry; 2307 while (entry != &map->header && entry->start < end) { 2308 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2309 /* 2310 * We have not yet clipped the entry. 2311 */ 2312 saved_start = (start >= entry->start) ? start : 2313 entry->start; 2314 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2315 if (vm_map_unlock_and_wait(map, 0)) { 2316 /* 2317 * Allow interruption of user wiring? 2318 */ 2319 } 2320 vm_map_lock(map); 2321 if (last_timestamp + 1 != map->timestamp) { 2322 /* 2323 * Look again for the entry because the map was 2324 * modified while it was unlocked. 2325 * Specifically, the entry may have been 2326 * clipped, merged, or deleted. 2327 */ 2328 if (!vm_map_lookup_entry(map, saved_start, 2329 &tmp_entry)) { 2330 if (flags & VM_MAP_WIRE_HOLESOK) 2331 tmp_entry = tmp_entry->next; 2332 else { 2333 if (saved_start == start) { 2334 /* 2335 * first_entry has been deleted. 2336 */ 2337 vm_map_unlock(map); 2338 return (KERN_INVALID_ADDRESS); 2339 } 2340 end = saved_start; 2341 rv = KERN_INVALID_ADDRESS; 2342 goto done; 2343 } 2344 } 2345 if (entry == first_entry) 2346 first_entry = tmp_entry; 2347 else 2348 first_entry = NULL; 2349 entry = tmp_entry; 2350 } 2351 last_timestamp = map->timestamp; 2352 continue; 2353 } 2354 vm_map_clip_start(map, entry, start); 2355 vm_map_clip_end(map, entry, end); 2356 /* 2357 * Mark the entry in case the map lock is released. (See 2358 * above.) 2359 */ 2360 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2361 /* 2362 * 2363 */ 2364 if (entry->wired_count == 0) { 2365 if ((entry->protection & (VM_PROT_READ|VM_PROT_EXECUTE)) 2366 == 0) { 2367 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2368 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2369 end = entry->end; 2370 rv = KERN_INVALID_ADDRESS; 2371 goto done; 2372 } 2373 goto next_entry; 2374 } 2375 entry->wired_count++; 2376 saved_start = entry->start; 2377 saved_end = entry->end; 2378 fictitious = entry->object.vm_object != NULL && 2379 (entry->object.vm_object->type == OBJT_DEVICE || 2380 entry->object.vm_object->type == OBJT_SG); 2381 /* 2382 * Release the map lock, relying on the in-transition 2383 * mark. 2384 */ 2385 vm_map_unlock(map); 2386 rv = vm_fault_wire(map, saved_start, saved_end, 2387 fictitious); 2388 vm_map_lock(map); 2389 if (last_timestamp + 1 != map->timestamp) { 2390 /* 2391 * Look again for the entry because the map was 2392 * modified while it was unlocked. The entry 2393 * may have been clipped, but NOT merged or 2394 * deleted. 2395 */ 2396 result = vm_map_lookup_entry(map, saved_start, 2397 &tmp_entry); 2398 KASSERT(result, ("vm_map_wire: lookup failed")); 2399 if (entry == first_entry) 2400 first_entry = tmp_entry; 2401 else 2402 first_entry = NULL; 2403 entry = tmp_entry; 2404 while (entry->end < saved_end) { 2405 if (rv != KERN_SUCCESS) { 2406 KASSERT(entry->wired_count == 1, 2407 ("vm_map_wire: bad count")); 2408 entry->wired_count = -1; 2409 } 2410 entry = entry->next; 2411 } 2412 } 2413 last_timestamp = map->timestamp; 2414 if (rv != KERN_SUCCESS) { 2415 KASSERT(entry->wired_count == 1, 2416 ("vm_map_wire: bad count")); 2417 /* 2418 * Assign an out-of-range value to represent 2419 * the failure to wire this entry. 2420 */ 2421 entry->wired_count = -1; 2422 end = entry->end; 2423 goto done; 2424 } 2425 } else if (!user_wire || 2426 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2427 entry->wired_count++; 2428 } 2429 /* 2430 * Check the map for holes in the specified region. 2431 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2432 */ 2433 next_entry: 2434 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2435 (entry->end < end && (entry->next == &map->header || 2436 entry->next->start > entry->end))) { 2437 end = entry->end; 2438 rv = KERN_INVALID_ADDRESS; 2439 goto done; 2440 } 2441 entry = entry->next; 2442 } 2443 rv = KERN_SUCCESS; 2444 done: 2445 need_wakeup = FALSE; 2446 if (first_entry == NULL) { 2447 result = vm_map_lookup_entry(map, start, &first_entry); 2448 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2449 first_entry = first_entry->next; 2450 else 2451 KASSERT(result, ("vm_map_wire: lookup failed")); 2452 } 2453 entry = first_entry; 2454 while (entry != &map->header && entry->start < end) { 2455 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2456 goto next_entry_done; 2457 if (rv == KERN_SUCCESS) { 2458 if (user_wire) 2459 entry->eflags |= MAP_ENTRY_USER_WIRED; 2460 } else if (entry->wired_count == -1) { 2461 /* 2462 * Wiring failed on this entry. Thus, unwiring is 2463 * unnecessary. 2464 */ 2465 entry->wired_count = 0; 2466 } else { 2467 if (!user_wire || 2468 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 2469 entry->wired_count--; 2470 if (entry->wired_count == 0) { 2471 /* 2472 * Retain the map lock. 2473 */ 2474 vm_fault_unwire(map, entry->start, entry->end, 2475 entry->object.vm_object != NULL && 2476 (entry->object.vm_object->type == OBJT_DEVICE || 2477 entry->object.vm_object->type == OBJT_SG)); 2478 } 2479 } 2480 next_entry_done: 2481 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2482 ("vm_map_wire: in-transition flag missing")); 2483 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED); 2484 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2485 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2486 need_wakeup = TRUE; 2487 } 2488 vm_map_simplify_entry(map, entry); 2489 entry = entry->next; 2490 } 2491 vm_map_unlock(map); 2492 if (need_wakeup) 2493 vm_map_wakeup(map); 2494 return (rv); 2495 } 2496 2497 /* 2498 * vm_map_sync 2499 * 2500 * Push any dirty cached pages in the address range to their pager. 2501 * If syncio is TRUE, dirty pages are written synchronously. 2502 * If invalidate is TRUE, any cached pages are freed as well. 2503 * 2504 * If the size of the region from start to end is zero, we are 2505 * supposed to flush all modified pages within the region containing 2506 * start. Unfortunately, a region can be split or coalesced with 2507 * neighboring regions, making it difficult to determine what the 2508 * original region was. Therefore, we approximate this requirement by 2509 * flushing the current region containing start. 2510 * 2511 * Returns an error if any part of the specified range is not mapped. 2512 */ 2513 int 2514 vm_map_sync( 2515 vm_map_t map, 2516 vm_offset_t start, 2517 vm_offset_t end, 2518 boolean_t syncio, 2519 boolean_t invalidate) 2520 { 2521 vm_map_entry_t current; 2522 vm_map_entry_t entry; 2523 vm_size_t size; 2524 vm_object_t object; 2525 vm_ooffset_t offset; 2526 unsigned int last_timestamp; 2527 2528 vm_map_lock_read(map); 2529 VM_MAP_RANGE_CHECK(map, start, end); 2530 if (!vm_map_lookup_entry(map, start, &entry)) { 2531 vm_map_unlock_read(map); 2532 return (KERN_INVALID_ADDRESS); 2533 } else if (start == end) { 2534 start = entry->start; 2535 end = entry->end; 2536 } 2537 /* 2538 * Make a first pass to check for user-wired memory and holes. 2539 */ 2540 for (current = entry; current != &map->header && current->start < end; 2541 current = current->next) { 2542 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2543 vm_map_unlock_read(map); 2544 return (KERN_INVALID_ARGUMENT); 2545 } 2546 if (end > current->end && 2547 (current->next == &map->header || 2548 current->end != current->next->start)) { 2549 vm_map_unlock_read(map); 2550 return (KERN_INVALID_ADDRESS); 2551 } 2552 } 2553 2554 if (invalidate) 2555 pmap_remove(map->pmap, start, end); 2556 2557 /* 2558 * Make a second pass, cleaning/uncaching pages from the indicated 2559 * objects as we go. 2560 */ 2561 for (current = entry; current != &map->header && current->start < end;) { 2562 offset = current->offset + (start - current->start); 2563 size = (end <= current->end ? end : current->end) - start; 2564 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2565 vm_map_t smap; 2566 vm_map_entry_t tentry; 2567 vm_size_t tsize; 2568 2569 smap = current->object.sub_map; 2570 vm_map_lock_read(smap); 2571 (void) vm_map_lookup_entry(smap, offset, &tentry); 2572 tsize = tentry->end - offset; 2573 if (tsize < size) 2574 size = tsize; 2575 object = tentry->object.vm_object; 2576 offset = tentry->offset + (offset - tentry->start); 2577 vm_map_unlock_read(smap); 2578 } else { 2579 object = current->object.vm_object; 2580 } 2581 vm_object_reference(object); 2582 last_timestamp = map->timestamp; 2583 vm_map_unlock_read(map); 2584 vm_object_sync(object, offset, size, syncio, invalidate); 2585 start += size; 2586 vm_object_deallocate(object); 2587 vm_map_lock_read(map); 2588 if (last_timestamp == map->timestamp || 2589 !vm_map_lookup_entry(map, start, ¤t)) 2590 current = current->next; 2591 } 2592 2593 vm_map_unlock_read(map); 2594 return (KERN_SUCCESS); 2595 } 2596 2597 /* 2598 * vm_map_entry_unwire: [ internal use only ] 2599 * 2600 * Make the region specified by this entry pageable. 2601 * 2602 * The map in question should be locked. 2603 * [This is the reason for this routine's existence.] 2604 */ 2605 static void 2606 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2607 { 2608 vm_fault_unwire(map, entry->start, entry->end, 2609 entry->object.vm_object != NULL && 2610 (entry->object.vm_object->type == OBJT_DEVICE || 2611 entry->object.vm_object->type == OBJT_SG)); 2612 entry->wired_count = 0; 2613 } 2614 2615 static void 2616 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2617 { 2618 2619 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2620 vm_object_deallocate(entry->object.vm_object); 2621 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2622 } 2623 2624 /* 2625 * vm_map_entry_delete: [ internal use only ] 2626 * 2627 * Deallocate the given entry from the target map. 2628 */ 2629 static void 2630 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2631 { 2632 vm_object_t object; 2633 vm_pindex_t offidxstart, offidxend, count, size1; 2634 vm_ooffset_t size; 2635 2636 vm_map_entry_unlink(map, entry); 2637 object = entry->object.vm_object; 2638 size = entry->end - entry->start; 2639 map->size -= size; 2640 2641 if (entry->uip != NULL) { 2642 swap_release_by_uid(size, entry->uip); 2643 uifree(entry->uip); 2644 } 2645 2646 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2647 (object != NULL)) { 2648 KASSERT(entry->uip == NULL || object->uip == NULL || 2649 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2650 ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry)); 2651 count = OFF_TO_IDX(size); 2652 offidxstart = OFF_TO_IDX(entry->offset); 2653 offidxend = offidxstart + count; 2654 VM_OBJECT_LOCK(object); 2655 if (object->ref_count != 1 && 2656 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2657 object == kernel_object || object == kmem_object)) { 2658 vm_object_collapse(object); 2659 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2660 if (object->type == OBJT_SWAP) 2661 swap_pager_freespace(object, offidxstart, count); 2662 if (offidxend >= object->size && 2663 offidxstart < object->size) { 2664 size1 = object->size; 2665 object->size = offidxstart; 2666 if (object->uip != NULL) { 2667 size1 -= object->size; 2668 KASSERT(object->charge >= ptoa(size1), 2669 ("vm_map_entry_delete: object->charge < 0")); 2670 swap_release_by_uid(ptoa(size1), object->uip); 2671 object->charge -= ptoa(size1); 2672 } 2673 } 2674 } 2675 VM_OBJECT_UNLOCK(object); 2676 } else 2677 entry->object.vm_object = NULL; 2678 if (map->system_map) 2679 vm_map_entry_deallocate(entry, TRUE); 2680 else { 2681 entry->next = curthread->td_map_def_user; 2682 curthread->td_map_def_user = entry; 2683 } 2684 } 2685 2686 /* 2687 * vm_map_delete: [ internal use only ] 2688 * 2689 * Deallocates the given address range from the target 2690 * map. 2691 */ 2692 int 2693 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2694 { 2695 vm_map_entry_t entry; 2696 vm_map_entry_t first_entry; 2697 2698 VM_MAP_ASSERT_LOCKED(map); 2699 2700 /* 2701 * Find the start of the region, and clip it 2702 */ 2703 if (!vm_map_lookup_entry(map, start, &first_entry)) 2704 entry = first_entry->next; 2705 else { 2706 entry = first_entry; 2707 vm_map_clip_start(map, entry, start); 2708 } 2709 2710 /* 2711 * Step through all entries in this region 2712 */ 2713 while ((entry != &map->header) && (entry->start < end)) { 2714 vm_map_entry_t next; 2715 2716 /* 2717 * Wait for wiring or unwiring of an entry to complete. 2718 * Also wait for any system wirings to disappear on 2719 * user maps. 2720 */ 2721 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 2722 (vm_map_pmap(map) != kernel_pmap && 2723 vm_map_entry_system_wired_count(entry) != 0)) { 2724 unsigned int last_timestamp; 2725 vm_offset_t saved_start; 2726 vm_map_entry_t tmp_entry; 2727 2728 saved_start = entry->start; 2729 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2730 last_timestamp = map->timestamp; 2731 (void) vm_map_unlock_and_wait(map, 0); 2732 vm_map_lock(map); 2733 if (last_timestamp + 1 != map->timestamp) { 2734 /* 2735 * Look again for the entry because the map was 2736 * modified while it was unlocked. 2737 * Specifically, the entry may have been 2738 * clipped, merged, or deleted. 2739 */ 2740 if (!vm_map_lookup_entry(map, saved_start, 2741 &tmp_entry)) 2742 entry = tmp_entry->next; 2743 else { 2744 entry = tmp_entry; 2745 vm_map_clip_start(map, entry, 2746 saved_start); 2747 } 2748 } 2749 continue; 2750 } 2751 vm_map_clip_end(map, entry, end); 2752 2753 next = entry->next; 2754 2755 /* 2756 * Unwire before removing addresses from the pmap; otherwise, 2757 * unwiring will put the entries back in the pmap. 2758 */ 2759 if (entry->wired_count != 0) { 2760 vm_map_entry_unwire(map, entry); 2761 } 2762 2763 pmap_remove(map->pmap, entry->start, entry->end); 2764 2765 /* 2766 * Delete the entry only after removing all pmap 2767 * entries pointing to its pages. (Otherwise, its 2768 * page frames may be reallocated, and any modify bits 2769 * will be set in the wrong object!) 2770 */ 2771 vm_map_entry_delete(map, entry); 2772 entry = next; 2773 } 2774 return (KERN_SUCCESS); 2775 } 2776 2777 /* 2778 * vm_map_remove: 2779 * 2780 * Remove the given address range from the target map. 2781 * This is the exported form of vm_map_delete. 2782 */ 2783 int 2784 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2785 { 2786 int result; 2787 2788 vm_map_lock(map); 2789 VM_MAP_RANGE_CHECK(map, start, end); 2790 result = vm_map_delete(map, start, end); 2791 vm_map_unlock(map); 2792 return (result); 2793 } 2794 2795 /* 2796 * vm_map_check_protection: 2797 * 2798 * Assert that the target map allows the specified privilege on the 2799 * entire address region given. The entire region must be allocated. 2800 * 2801 * WARNING! This code does not and should not check whether the 2802 * contents of the region is accessible. For example a smaller file 2803 * might be mapped into a larger address space. 2804 * 2805 * NOTE! This code is also called by munmap(). 2806 * 2807 * The map must be locked. A read lock is sufficient. 2808 */ 2809 boolean_t 2810 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2811 vm_prot_t protection) 2812 { 2813 vm_map_entry_t entry; 2814 vm_map_entry_t tmp_entry; 2815 2816 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2817 return (FALSE); 2818 entry = tmp_entry; 2819 2820 while (start < end) { 2821 if (entry == &map->header) 2822 return (FALSE); 2823 /* 2824 * No holes allowed! 2825 */ 2826 if (start < entry->start) 2827 return (FALSE); 2828 /* 2829 * Check protection associated with entry. 2830 */ 2831 if ((entry->protection & protection) != protection) 2832 return (FALSE); 2833 /* go to next entry */ 2834 start = entry->end; 2835 entry = entry->next; 2836 } 2837 return (TRUE); 2838 } 2839 2840 /* 2841 * vm_map_copy_entry: 2842 * 2843 * Copies the contents of the source entry to the destination 2844 * entry. The entries *must* be aligned properly. 2845 */ 2846 static void 2847 vm_map_copy_entry( 2848 vm_map_t src_map, 2849 vm_map_t dst_map, 2850 vm_map_entry_t src_entry, 2851 vm_map_entry_t dst_entry, 2852 vm_ooffset_t *fork_charge) 2853 { 2854 vm_object_t src_object; 2855 vm_offset_t size; 2856 struct uidinfo *uip; 2857 int charged; 2858 2859 VM_MAP_ASSERT_LOCKED(dst_map); 2860 2861 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2862 return; 2863 2864 if (src_entry->wired_count == 0) { 2865 2866 /* 2867 * If the source entry is marked needs_copy, it is already 2868 * write-protected. 2869 */ 2870 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2871 pmap_protect(src_map->pmap, 2872 src_entry->start, 2873 src_entry->end, 2874 src_entry->protection & ~VM_PROT_WRITE); 2875 } 2876 2877 /* 2878 * Make a copy of the object. 2879 */ 2880 size = src_entry->end - src_entry->start; 2881 if ((src_object = src_entry->object.vm_object) != NULL) { 2882 VM_OBJECT_LOCK(src_object); 2883 charged = ENTRY_CHARGED(src_entry); 2884 if ((src_object->handle == NULL) && 2885 (src_object->type == OBJT_DEFAULT || 2886 src_object->type == OBJT_SWAP)) { 2887 vm_object_collapse(src_object); 2888 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2889 vm_object_split(src_entry); 2890 src_object = src_entry->object.vm_object; 2891 } 2892 } 2893 vm_object_reference_locked(src_object); 2894 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2895 if (src_entry->uip != NULL && 2896 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 2897 KASSERT(src_object->uip == NULL, 2898 ("OVERCOMMIT: vm_map_copy_entry: uip %p", 2899 src_object)); 2900 src_object->uip = src_entry->uip; 2901 src_object->charge = size; 2902 } 2903 VM_OBJECT_UNLOCK(src_object); 2904 dst_entry->object.vm_object = src_object; 2905 if (charged) { 2906 uip = curthread->td_ucred->cr_ruidinfo; 2907 uihold(uip); 2908 dst_entry->uip = uip; 2909 *fork_charge += size; 2910 if (!(src_entry->eflags & 2911 MAP_ENTRY_NEEDS_COPY)) { 2912 uihold(uip); 2913 src_entry->uip = uip; 2914 *fork_charge += size; 2915 } 2916 } 2917 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2918 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2919 dst_entry->offset = src_entry->offset; 2920 } else { 2921 dst_entry->object.vm_object = NULL; 2922 dst_entry->offset = 0; 2923 if (src_entry->uip != NULL) { 2924 dst_entry->uip = curthread->td_ucred->cr_ruidinfo; 2925 uihold(dst_entry->uip); 2926 *fork_charge += size; 2927 } 2928 } 2929 2930 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2931 dst_entry->end - dst_entry->start, src_entry->start); 2932 } else { 2933 /* 2934 * Of course, wired down pages can't be set copy-on-write. 2935 * Cause wired pages to be copied into the new map by 2936 * simulating faults (the new pages are pageable) 2937 */ 2938 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 2939 fork_charge); 2940 } 2941 } 2942 2943 /* 2944 * vmspace_map_entry_forked: 2945 * Update the newly-forked vmspace each time a map entry is inherited 2946 * or copied. The values for vm_dsize and vm_tsize are approximate 2947 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 2948 */ 2949 static void 2950 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 2951 vm_map_entry_t entry) 2952 { 2953 vm_size_t entrysize; 2954 vm_offset_t newend; 2955 2956 entrysize = entry->end - entry->start; 2957 vm2->vm_map.size += entrysize; 2958 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 2959 vm2->vm_ssize += btoc(entrysize); 2960 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 2961 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 2962 newend = MIN(entry->end, 2963 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 2964 vm2->vm_dsize += btoc(newend - entry->start); 2965 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 2966 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 2967 newend = MIN(entry->end, 2968 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 2969 vm2->vm_tsize += btoc(newend - entry->start); 2970 } 2971 } 2972 2973 /* 2974 * vmspace_fork: 2975 * Create a new process vmspace structure and vm_map 2976 * based on those of an existing process. The new map 2977 * is based on the old map, according to the inheritance 2978 * values on the regions in that map. 2979 * 2980 * XXX It might be worth coalescing the entries added to the new vmspace. 2981 * 2982 * The source map must not be locked. 2983 */ 2984 struct vmspace * 2985 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 2986 { 2987 struct vmspace *vm2; 2988 vm_map_t old_map = &vm1->vm_map; 2989 vm_map_t new_map; 2990 vm_map_entry_t old_entry; 2991 vm_map_entry_t new_entry; 2992 vm_object_t object; 2993 int locked; 2994 2995 vm_map_lock(old_map); 2996 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2997 if (vm2 == NULL) 2998 goto unlock_and_return; 2999 vm2->vm_taddr = vm1->vm_taddr; 3000 vm2->vm_daddr = vm1->vm_daddr; 3001 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3002 new_map = &vm2->vm_map; /* XXX */ 3003 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3004 KASSERT(locked, ("vmspace_fork: lock failed")); 3005 new_map->timestamp = 1; 3006 3007 old_entry = old_map->header.next; 3008 3009 while (old_entry != &old_map->header) { 3010 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3011 panic("vm_map_fork: encountered a submap"); 3012 3013 switch (old_entry->inheritance) { 3014 case VM_INHERIT_NONE: 3015 break; 3016 3017 case VM_INHERIT_SHARE: 3018 /* 3019 * Clone the entry, creating the shared object if necessary. 3020 */ 3021 object = old_entry->object.vm_object; 3022 if (object == NULL) { 3023 object = vm_object_allocate(OBJT_DEFAULT, 3024 atop(old_entry->end - old_entry->start)); 3025 old_entry->object.vm_object = object; 3026 old_entry->offset = 0; 3027 if (old_entry->uip != NULL) { 3028 object->uip = old_entry->uip; 3029 object->charge = old_entry->end - 3030 old_entry->start; 3031 old_entry->uip = NULL; 3032 } 3033 } 3034 3035 /* 3036 * Add the reference before calling vm_object_shadow 3037 * to insure that a shadow object is created. 3038 */ 3039 vm_object_reference(object); 3040 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3041 vm_object_shadow(&old_entry->object.vm_object, 3042 &old_entry->offset, 3043 atop(old_entry->end - old_entry->start)); 3044 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3045 /* Transfer the second reference too. */ 3046 vm_object_reference( 3047 old_entry->object.vm_object); 3048 3049 /* 3050 * As in vm_map_simplify_entry(), the 3051 * vnode lock will not be acquired in 3052 * this call to vm_object_deallocate(). 3053 */ 3054 vm_object_deallocate(object); 3055 object = old_entry->object.vm_object; 3056 } 3057 VM_OBJECT_LOCK(object); 3058 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3059 if (old_entry->uip != NULL) { 3060 KASSERT(object->uip == NULL, ("vmspace_fork both uip")); 3061 object->uip = old_entry->uip; 3062 object->charge = old_entry->end - old_entry->start; 3063 old_entry->uip = NULL; 3064 } 3065 VM_OBJECT_UNLOCK(object); 3066 3067 /* 3068 * Clone the entry, referencing the shared object. 3069 */ 3070 new_entry = vm_map_entry_create(new_map); 3071 *new_entry = *old_entry; 3072 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3073 MAP_ENTRY_IN_TRANSITION); 3074 new_entry->wired_count = 0; 3075 3076 /* 3077 * Insert the entry into the new map -- we know we're 3078 * inserting at the end of the new map. 3079 */ 3080 vm_map_entry_link(new_map, new_map->header.prev, 3081 new_entry); 3082 vmspace_map_entry_forked(vm1, vm2, new_entry); 3083 3084 /* 3085 * Update the physical map 3086 */ 3087 pmap_copy(new_map->pmap, old_map->pmap, 3088 new_entry->start, 3089 (old_entry->end - old_entry->start), 3090 old_entry->start); 3091 break; 3092 3093 case VM_INHERIT_COPY: 3094 /* 3095 * Clone the entry and link into the map. 3096 */ 3097 new_entry = vm_map_entry_create(new_map); 3098 *new_entry = *old_entry; 3099 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3100 MAP_ENTRY_IN_TRANSITION); 3101 new_entry->wired_count = 0; 3102 new_entry->object.vm_object = NULL; 3103 new_entry->uip = NULL; 3104 vm_map_entry_link(new_map, new_map->header.prev, 3105 new_entry); 3106 vmspace_map_entry_forked(vm1, vm2, new_entry); 3107 vm_map_copy_entry(old_map, new_map, old_entry, 3108 new_entry, fork_charge); 3109 break; 3110 } 3111 old_entry = old_entry->next; 3112 } 3113 unlock_and_return: 3114 vm_map_unlock(old_map); 3115 if (vm2 != NULL) 3116 vm_map_unlock(new_map); 3117 3118 return (vm2); 3119 } 3120 3121 int 3122 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3123 vm_prot_t prot, vm_prot_t max, int cow) 3124 { 3125 vm_map_entry_t new_entry, prev_entry; 3126 vm_offset_t bot, top; 3127 vm_size_t init_ssize; 3128 int orient, rv; 3129 rlim_t vmemlim; 3130 3131 /* 3132 * The stack orientation is piggybacked with the cow argument. 3133 * Extract it into orient and mask the cow argument so that we 3134 * don't pass it around further. 3135 * NOTE: We explicitly allow bi-directional stacks. 3136 */ 3137 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 3138 cow &= ~orient; 3139 KASSERT(orient != 0, ("No stack grow direction")); 3140 3141 if (addrbos < vm_map_min(map) || 3142 addrbos > vm_map_max(map) || 3143 addrbos + max_ssize < addrbos) 3144 return (KERN_NO_SPACE); 3145 3146 init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 3147 3148 PROC_LOCK(curthread->td_proc); 3149 vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); 3150 PROC_UNLOCK(curthread->td_proc); 3151 3152 vm_map_lock(map); 3153 3154 /* If addr is already mapped, no go */ 3155 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 3156 vm_map_unlock(map); 3157 return (KERN_NO_SPACE); 3158 } 3159 3160 /* If we would blow our VMEM resource limit, no go */ 3161 if (map->size + init_ssize > vmemlim) { 3162 vm_map_unlock(map); 3163 return (KERN_NO_SPACE); 3164 } 3165 3166 /* 3167 * If we can't accomodate max_ssize in the current mapping, no go. 3168 * However, we need to be aware that subsequent user mappings might 3169 * map into the space we have reserved for stack, and currently this 3170 * space is not protected. 3171 * 3172 * Hopefully we will at least detect this condition when we try to 3173 * grow the stack. 3174 */ 3175 if ((prev_entry->next != &map->header) && 3176 (prev_entry->next->start < addrbos + max_ssize)) { 3177 vm_map_unlock(map); 3178 return (KERN_NO_SPACE); 3179 } 3180 3181 /* 3182 * We initially map a stack of only init_ssize. We will grow as 3183 * needed later. Depending on the orientation of the stack (i.e. 3184 * the grow direction) we either map at the top of the range, the 3185 * bottom of the range or in the middle. 3186 * 3187 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3188 * and cow to be 0. Possibly we should eliminate these as input 3189 * parameters, and just pass these values here in the insert call. 3190 */ 3191 if (orient == MAP_STACK_GROWS_DOWN) 3192 bot = addrbos + max_ssize - init_ssize; 3193 else if (orient == MAP_STACK_GROWS_UP) 3194 bot = addrbos; 3195 else 3196 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 3197 top = bot + init_ssize; 3198 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3199 3200 /* Now set the avail_ssize amount. */ 3201 if (rv == KERN_SUCCESS) { 3202 if (prev_entry != &map->header) 3203 vm_map_clip_end(map, prev_entry, bot); 3204 new_entry = prev_entry->next; 3205 if (new_entry->end != top || new_entry->start != bot) 3206 panic("Bad entry start/end for new stack entry"); 3207 3208 new_entry->avail_ssize = max_ssize - init_ssize; 3209 if (orient & MAP_STACK_GROWS_DOWN) 3210 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 3211 if (orient & MAP_STACK_GROWS_UP) 3212 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 3213 } 3214 3215 vm_map_unlock(map); 3216 return (rv); 3217 } 3218 3219 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3220 * desired address is already mapped, or if we successfully grow 3221 * the stack. Also returns KERN_SUCCESS if addr is outside the 3222 * stack range (this is strange, but preserves compatibility with 3223 * the grow function in vm_machdep.c). 3224 */ 3225 int 3226 vm_map_growstack(struct proc *p, vm_offset_t addr) 3227 { 3228 vm_map_entry_t next_entry, prev_entry; 3229 vm_map_entry_t new_entry, stack_entry; 3230 struct vmspace *vm = p->p_vmspace; 3231 vm_map_t map = &vm->vm_map; 3232 vm_offset_t end; 3233 size_t grow_amount, max_grow; 3234 rlim_t stacklim, vmemlim; 3235 int is_procstack, rv; 3236 struct uidinfo *uip; 3237 3238 Retry: 3239 PROC_LOCK(p); 3240 stacklim = lim_cur(p, RLIMIT_STACK); 3241 vmemlim = lim_cur(p, RLIMIT_VMEM); 3242 PROC_UNLOCK(p); 3243 3244 vm_map_lock_read(map); 3245 3246 /* If addr is already in the entry range, no need to grow.*/ 3247 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 3248 vm_map_unlock_read(map); 3249 return (KERN_SUCCESS); 3250 } 3251 3252 next_entry = prev_entry->next; 3253 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 3254 /* 3255 * This entry does not grow upwards. Since the address lies 3256 * beyond this entry, the next entry (if one exists) has to 3257 * be a downward growable entry. The entry list header is 3258 * never a growable entry, so it suffices to check the flags. 3259 */ 3260 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 3261 vm_map_unlock_read(map); 3262 return (KERN_SUCCESS); 3263 } 3264 stack_entry = next_entry; 3265 } else { 3266 /* 3267 * This entry grows upward. If the next entry does not at 3268 * least grow downwards, this is the entry we need to grow. 3269 * otherwise we have two possible choices and we have to 3270 * select one. 3271 */ 3272 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 3273 /* 3274 * We have two choices; grow the entry closest to 3275 * the address to minimize the amount of growth. 3276 */ 3277 if (addr - prev_entry->end <= next_entry->start - addr) 3278 stack_entry = prev_entry; 3279 else 3280 stack_entry = next_entry; 3281 } else 3282 stack_entry = prev_entry; 3283 } 3284 3285 if (stack_entry == next_entry) { 3286 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 3287 KASSERT(addr < stack_entry->start, ("foo")); 3288 end = (prev_entry != &map->header) ? prev_entry->end : 3289 stack_entry->start - stack_entry->avail_ssize; 3290 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 3291 max_grow = stack_entry->start - end; 3292 } else { 3293 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 3294 KASSERT(addr >= stack_entry->end, ("foo")); 3295 end = (next_entry != &map->header) ? next_entry->start : 3296 stack_entry->end + stack_entry->avail_ssize; 3297 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 3298 max_grow = end - stack_entry->end; 3299 } 3300 3301 if (grow_amount > stack_entry->avail_ssize) { 3302 vm_map_unlock_read(map); 3303 return (KERN_NO_SPACE); 3304 } 3305 3306 /* 3307 * If there is no longer enough space between the entries nogo, and 3308 * adjust the available space. Note: this should only happen if the 3309 * user has mapped into the stack area after the stack was created, 3310 * and is probably an error. 3311 * 3312 * This also effectively destroys any guard page the user might have 3313 * intended by limiting the stack size. 3314 */ 3315 if (grow_amount > max_grow) { 3316 if (vm_map_lock_upgrade(map)) 3317 goto Retry; 3318 3319 stack_entry->avail_ssize = max_grow; 3320 3321 vm_map_unlock(map); 3322 return (KERN_NO_SPACE); 3323 } 3324 3325 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 3326 3327 /* 3328 * If this is the main process stack, see if we're over the stack 3329 * limit. 3330 */ 3331 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3332 vm_map_unlock_read(map); 3333 return (KERN_NO_SPACE); 3334 } 3335 3336 /* Round up the grow amount modulo SGROWSIZ */ 3337 grow_amount = roundup (grow_amount, sgrowsiz); 3338 if (grow_amount > stack_entry->avail_ssize) 3339 grow_amount = stack_entry->avail_ssize; 3340 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3341 grow_amount = trunc_page((vm_size_t)stacklim) - 3342 ctob(vm->vm_ssize); 3343 } 3344 3345 /* If we would blow our VMEM resource limit, no go */ 3346 if (map->size + grow_amount > vmemlim) { 3347 vm_map_unlock_read(map); 3348 return (KERN_NO_SPACE); 3349 } 3350 3351 if (vm_map_lock_upgrade(map)) 3352 goto Retry; 3353 3354 if (stack_entry == next_entry) { 3355 /* 3356 * Growing downward. 3357 */ 3358 /* Get the preliminary new entry start value */ 3359 addr = stack_entry->start - grow_amount; 3360 3361 /* 3362 * If this puts us into the previous entry, cut back our 3363 * growth to the available space. Also, see the note above. 3364 */ 3365 if (addr < end) { 3366 stack_entry->avail_ssize = max_grow; 3367 addr = end; 3368 } 3369 3370 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 3371 p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 3372 3373 /* Adjust the available stack space by the amount we grew. */ 3374 if (rv == KERN_SUCCESS) { 3375 if (prev_entry != &map->header) 3376 vm_map_clip_end(map, prev_entry, addr); 3377 new_entry = prev_entry->next; 3378 KASSERT(new_entry == stack_entry->prev, ("foo")); 3379 KASSERT(new_entry->end == stack_entry->start, ("foo")); 3380 KASSERT(new_entry->start == addr, ("foo")); 3381 grow_amount = new_entry->end - new_entry->start; 3382 new_entry->avail_ssize = stack_entry->avail_ssize - 3383 grow_amount; 3384 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 3385 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 3386 } 3387 } else { 3388 /* 3389 * Growing upward. 3390 */ 3391 addr = stack_entry->end + grow_amount; 3392 3393 /* 3394 * If this puts us into the next entry, cut back our growth 3395 * to the available space. Also, see the note above. 3396 */ 3397 if (addr > end) { 3398 stack_entry->avail_ssize = end - stack_entry->end; 3399 addr = end; 3400 } 3401 3402 grow_amount = addr - stack_entry->end; 3403 uip = stack_entry->uip; 3404 if (uip == NULL && stack_entry->object.vm_object != NULL) 3405 uip = stack_entry->object.vm_object->uip; 3406 if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip)) 3407 rv = KERN_NO_SPACE; 3408 /* Grow the underlying object if applicable. */ 3409 else if (stack_entry->object.vm_object == NULL || 3410 vm_object_coalesce(stack_entry->object.vm_object, 3411 stack_entry->offset, 3412 (vm_size_t)(stack_entry->end - stack_entry->start), 3413 (vm_size_t)grow_amount, uip != NULL)) { 3414 map->size += (addr - stack_entry->end); 3415 /* Update the current entry. */ 3416 stack_entry->end = addr; 3417 stack_entry->avail_ssize -= grow_amount; 3418 vm_map_entry_resize_free(map, stack_entry); 3419 rv = KERN_SUCCESS; 3420 3421 if (next_entry != &map->header) 3422 vm_map_clip_start(map, next_entry, addr); 3423 } else 3424 rv = KERN_FAILURE; 3425 } 3426 3427 if (rv == KERN_SUCCESS && is_procstack) 3428 vm->vm_ssize += btoc(grow_amount); 3429 3430 vm_map_unlock(map); 3431 3432 /* 3433 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3434 */ 3435 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 3436 vm_map_wire(map, 3437 (stack_entry == next_entry) ? addr : addr - grow_amount, 3438 (stack_entry == next_entry) ? stack_entry->start : addr, 3439 (p->p_flag & P_SYSTEM) 3440 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 3441 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 3442 } 3443 3444 return (rv); 3445 } 3446 3447 /* 3448 * Unshare the specified VM space for exec. If other processes are 3449 * mapped to it, then create a new one. The new vmspace is null. 3450 */ 3451 int 3452 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3453 { 3454 struct vmspace *oldvmspace = p->p_vmspace; 3455 struct vmspace *newvmspace; 3456 3457 newvmspace = vmspace_alloc(minuser, maxuser); 3458 if (newvmspace == NULL) 3459 return (ENOMEM); 3460 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3461 /* 3462 * This code is written like this for prototype purposes. The 3463 * goal is to avoid running down the vmspace here, but let the 3464 * other process's that are still using the vmspace to finally 3465 * run it down. Even though there is little or no chance of blocking 3466 * here, it is a good idea to keep this form for future mods. 3467 */ 3468 PROC_VMSPACE_LOCK(p); 3469 p->p_vmspace = newvmspace; 3470 PROC_VMSPACE_UNLOCK(p); 3471 if (p == curthread->td_proc) 3472 pmap_activate(curthread); 3473 vmspace_free(oldvmspace); 3474 return (0); 3475 } 3476 3477 /* 3478 * Unshare the specified VM space for forcing COW. This 3479 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3480 */ 3481 int 3482 vmspace_unshare(struct proc *p) 3483 { 3484 struct vmspace *oldvmspace = p->p_vmspace; 3485 struct vmspace *newvmspace; 3486 vm_ooffset_t fork_charge; 3487 3488 if (oldvmspace->vm_refcnt == 1) 3489 return (0); 3490 fork_charge = 0; 3491 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 3492 if (newvmspace == NULL) 3493 return (ENOMEM); 3494 if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) { 3495 vmspace_free(newvmspace); 3496 return (ENOMEM); 3497 } 3498 PROC_VMSPACE_LOCK(p); 3499 p->p_vmspace = newvmspace; 3500 PROC_VMSPACE_UNLOCK(p); 3501 if (p == curthread->td_proc) 3502 pmap_activate(curthread); 3503 vmspace_free(oldvmspace); 3504 return (0); 3505 } 3506 3507 /* 3508 * vm_map_lookup: 3509 * 3510 * Finds the VM object, offset, and 3511 * protection for a given virtual address in the 3512 * specified map, assuming a page fault of the 3513 * type specified. 3514 * 3515 * Leaves the map in question locked for read; return 3516 * values are guaranteed until a vm_map_lookup_done 3517 * call is performed. Note that the map argument 3518 * is in/out; the returned map must be used in 3519 * the call to vm_map_lookup_done. 3520 * 3521 * A handle (out_entry) is returned for use in 3522 * vm_map_lookup_done, to make that fast. 3523 * 3524 * If a lookup is requested with "write protection" 3525 * specified, the map may be changed to perform virtual 3526 * copying operations, although the data referenced will 3527 * remain the same. 3528 */ 3529 int 3530 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3531 vm_offset_t vaddr, 3532 vm_prot_t fault_typea, 3533 vm_map_entry_t *out_entry, /* OUT */ 3534 vm_object_t *object, /* OUT */ 3535 vm_pindex_t *pindex, /* OUT */ 3536 vm_prot_t *out_prot, /* OUT */ 3537 boolean_t *wired) /* OUT */ 3538 { 3539 vm_map_entry_t entry; 3540 vm_map_t map = *var_map; 3541 vm_prot_t prot; 3542 vm_prot_t fault_type = fault_typea; 3543 vm_object_t eobject; 3544 struct uidinfo *uip; 3545 vm_ooffset_t size; 3546 3547 RetryLookup:; 3548 3549 vm_map_lock_read(map); 3550 3551 /* 3552 * Lookup the faulting address. 3553 */ 3554 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 3555 vm_map_unlock_read(map); 3556 return (KERN_INVALID_ADDRESS); 3557 } 3558 3559 entry = *out_entry; 3560 3561 /* 3562 * Handle submaps. 3563 */ 3564 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3565 vm_map_t old_map = map; 3566 3567 *var_map = map = entry->object.sub_map; 3568 vm_map_unlock_read(old_map); 3569 goto RetryLookup; 3570 } 3571 3572 /* 3573 * Check whether this task is allowed to have this page. 3574 */ 3575 prot = entry->protection; 3576 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3577 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 3578 vm_map_unlock_read(map); 3579 return (KERN_PROTECTION_FAILURE); 3580 } 3581 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3582 (entry->eflags & MAP_ENTRY_COW) && 3583 (fault_type & VM_PROT_WRITE)) { 3584 vm_map_unlock_read(map); 3585 return (KERN_PROTECTION_FAILURE); 3586 } 3587 3588 /* 3589 * If this page is not pageable, we have to get it for all possible 3590 * accesses. 3591 */ 3592 *wired = (entry->wired_count != 0); 3593 if (*wired) 3594 fault_type = entry->protection; 3595 size = entry->end - entry->start; 3596 /* 3597 * If the entry was copy-on-write, we either ... 3598 */ 3599 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3600 /* 3601 * If we want to write the page, we may as well handle that 3602 * now since we've got the map locked. 3603 * 3604 * If we don't need to write the page, we just demote the 3605 * permissions allowed. 3606 */ 3607 if ((fault_type & VM_PROT_WRITE) != 0 || 3608 (fault_typea & VM_PROT_COPY) != 0) { 3609 /* 3610 * Make a new object, and place it in the object 3611 * chain. Note that no new references have appeared 3612 * -- one just moved from the map to the new 3613 * object. 3614 */ 3615 if (vm_map_lock_upgrade(map)) 3616 goto RetryLookup; 3617 3618 if (entry->uip == NULL) { 3619 /* 3620 * The debugger owner is charged for 3621 * the memory. 3622 */ 3623 uip = curthread->td_ucred->cr_ruidinfo; 3624 uihold(uip); 3625 if (!swap_reserve_by_uid(size, uip)) { 3626 uifree(uip); 3627 vm_map_unlock(map); 3628 return (KERN_RESOURCE_SHORTAGE); 3629 } 3630 entry->uip = uip; 3631 } 3632 vm_object_shadow( 3633 &entry->object.vm_object, 3634 &entry->offset, 3635 atop(size)); 3636 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3637 eobject = entry->object.vm_object; 3638 if (eobject->uip != NULL) { 3639 /* 3640 * The object was not shadowed. 3641 */ 3642 swap_release_by_uid(size, entry->uip); 3643 uifree(entry->uip); 3644 entry->uip = NULL; 3645 } else if (entry->uip != NULL) { 3646 VM_OBJECT_LOCK(eobject); 3647 eobject->uip = entry->uip; 3648 eobject->charge = size; 3649 VM_OBJECT_UNLOCK(eobject); 3650 entry->uip = NULL; 3651 } 3652 3653 vm_map_lock_downgrade(map); 3654 } else { 3655 /* 3656 * We're attempting to read a copy-on-write page -- 3657 * don't allow writes. 3658 */ 3659 prot &= ~VM_PROT_WRITE; 3660 } 3661 } 3662 3663 /* 3664 * Create an object if necessary. 3665 */ 3666 if (entry->object.vm_object == NULL && 3667 !map->system_map) { 3668 if (vm_map_lock_upgrade(map)) 3669 goto RetryLookup; 3670 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3671 atop(size)); 3672 entry->offset = 0; 3673 if (entry->uip != NULL) { 3674 VM_OBJECT_LOCK(entry->object.vm_object); 3675 entry->object.vm_object->uip = entry->uip; 3676 entry->object.vm_object->charge = size; 3677 VM_OBJECT_UNLOCK(entry->object.vm_object); 3678 entry->uip = NULL; 3679 } 3680 vm_map_lock_downgrade(map); 3681 } 3682 3683 /* 3684 * Return the object/offset from this entry. If the entry was 3685 * copy-on-write or empty, it has been fixed up. 3686 */ 3687 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3688 *object = entry->object.vm_object; 3689 3690 *out_prot = prot; 3691 return (KERN_SUCCESS); 3692 } 3693 3694 /* 3695 * vm_map_lookup_locked: 3696 * 3697 * Lookup the faulting address. A version of vm_map_lookup that returns 3698 * KERN_FAILURE instead of blocking on map lock or memory allocation. 3699 */ 3700 int 3701 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 3702 vm_offset_t vaddr, 3703 vm_prot_t fault_typea, 3704 vm_map_entry_t *out_entry, /* OUT */ 3705 vm_object_t *object, /* OUT */ 3706 vm_pindex_t *pindex, /* OUT */ 3707 vm_prot_t *out_prot, /* OUT */ 3708 boolean_t *wired) /* OUT */ 3709 { 3710 vm_map_entry_t entry; 3711 vm_map_t map = *var_map; 3712 vm_prot_t prot; 3713 vm_prot_t fault_type = fault_typea; 3714 3715 /* 3716 * Lookup the faulting address. 3717 */ 3718 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 3719 return (KERN_INVALID_ADDRESS); 3720 3721 entry = *out_entry; 3722 3723 /* 3724 * Fail if the entry refers to a submap. 3725 */ 3726 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3727 return (KERN_FAILURE); 3728 3729 /* 3730 * Check whether this task is allowed to have this page. 3731 */ 3732 prot = entry->protection; 3733 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 3734 if ((fault_type & prot) != fault_type) 3735 return (KERN_PROTECTION_FAILURE); 3736 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3737 (entry->eflags & MAP_ENTRY_COW) && 3738 (fault_type & VM_PROT_WRITE)) 3739 return (KERN_PROTECTION_FAILURE); 3740 3741 /* 3742 * If this page is not pageable, we have to get it for all possible 3743 * accesses. 3744 */ 3745 *wired = (entry->wired_count != 0); 3746 if (*wired) 3747 fault_type = entry->protection; 3748 3749 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3750 /* 3751 * Fail if the entry was copy-on-write for a write fault. 3752 */ 3753 if (fault_type & VM_PROT_WRITE) 3754 return (KERN_FAILURE); 3755 /* 3756 * We're attempting to read a copy-on-write page -- 3757 * don't allow writes. 3758 */ 3759 prot &= ~VM_PROT_WRITE; 3760 } 3761 3762 /* 3763 * Fail if an object should be created. 3764 */ 3765 if (entry->object.vm_object == NULL && !map->system_map) 3766 return (KERN_FAILURE); 3767 3768 /* 3769 * Return the object/offset from this entry. If the entry was 3770 * copy-on-write or empty, it has been fixed up. 3771 */ 3772 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3773 *object = entry->object.vm_object; 3774 3775 *out_prot = prot; 3776 return (KERN_SUCCESS); 3777 } 3778 3779 /* 3780 * vm_map_lookup_done: 3781 * 3782 * Releases locks acquired by a vm_map_lookup 3783 * (according to the handle returned by that lookup). 3784 */ 3785 void 3786 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3787 { 3788 /* 3789 * Unlock the main-level map 3790 */ 3791 vm_map_unlock_read(map); 3792 } 3793 3794 #include "opt_ddb.h" 3795 #ifdef DDB 3796 #include <sys/kernel.h> 3797 3798 #include <ddb/ddb.h> 3799 3800 /* 3801 * vm_map_print: [ debug ] 3802 */ 3803 DB_SHOW_COMMAND(map, vm_map_print) 3804 { 3805 static int nlines; 3806 /* XXX convert args. */ 3807 vm_map_t map = (vm_map_t)addr; 3808 boolean_t full = have_addr; 3809 3810 vm_map_entry_t entry; 3811 3812 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3813 (void *)map, 3814 (void *)map->pmap, map->nentries, map->timestamp); 3815 nlines++; 3816 3817 if (!full && db_indent) 3818 return; 3819 3820 db_indent += 2; 3821 for (entry = map->header.next; entry != &map->header; 3822 entry = entry->next) { 3823 db_iprintf("map entry %p: start=%p, end=%p\n", 3824 (void *)entry, (void *)entry->start, (void *)entry->end); 3825 nlines++; 3826 { 3827 static char *inheritance_name[4] = 3828 {"share", "copy", "none", "donate_copy"}; 3829 3830 db_iprintf(" prot=%x/%x/%s", 3831 entry->protection, 3832 entry->max_protection, 3833 inheritance_name[(int)(unsigned char)entry->inheritance]); 3834 if (entry->wired_count != 0) 3835 db_printf(", wired"); 3836 } 3837 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3838 db_printf(", share=%p, offset=0x%jx\n", 3839 (void *)entry->object.sub_map, 3840 (uintmax_t)entry->offset); 3841 nlines++; 3842 if ((entry->prev == &map->header) || 3843 (entry->prev->object.sub_map != 3844 entry->object.sub_map)) { 3845 db_indent += 2; 3846 vm_map_print((db_expr_t)(intptr_t) 3847 entry->object.sub_map, 3848 full, 0, (char *)0); 3849 db_indent -= 2; 3850 } 3851 } else { 3852 if (entry->uip != NULL) 3853 db_printf(", uip %d", entry->uip->ui_uid); 3854 db_printf(", object=%p, offset=0x%jx", 3855 (void *)entry->object.vm_object, 3856 (uintmax_t)entry->offset); 3857 if (entry->object.vm_object && entry->object.vm_object->uip) 3858 db_printf(", obj uip %d charge %jx", 3859 entry->object.vm_object->uip->ui_uid, 3860 (uintmax_t)entry->object.vm_object->charge); 3861 if (entry->eflags & MAP_ENTRY_COW) 3862 db_printf(", copy (%s)", 3863 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3864 db_printf("\n"); 3865 nlines++; 3866 3867 if ((entry->prev == &map->header) || 3868 (entry->prev->object.vm_object != 3869 entry->object.vm_object)) { 3870 db_indent += 2; 3871 vm_object_print((db_expr_t)(intptr_t) 3872 entry->object.vm_object, 3873 full, 0, (char *)0); 3874 nlines += 4; 3875 db_indent -= 2; 3876 } 3877 } 3878 } 3879 db_indent -= 2; 3880 if (db_indent == 0) 3881 nlines = 0; 3882 } 3883 3884 3885 DB_SHOW_COMMAND(procvm, procvm) 3886 { 3887 struct proc *p; 3888 3889 if (have_addr) { 3890 p = (struct proc *) addr; 3891 } else { 3892 p = curproc; 3893 } 3894 3895 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3896 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3897 (void *)vmspace_pmap(p->p_vmspace)); 3898 3899 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3900 } 3901 3902 #endif /* DDB */ 3903