1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/mman.h> 77 #include <sys/vnode.h> 78 #include <sys/racct.h> 79 #include <sys/resourcevar.h> 80 #include <sys/rwlock.h> 81 #include <sys/file.h> 82 #include <sys/sysctl.h> 83 #include <sys/sysent.h> 84 #include <sys/shm.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vnode_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/uma.h> 98 99 /* 100 * Virtual memory maps provide for the mapping, protection, 101 * and sharing of virtual memory objects. In addition, 102 * this module provides for an efficient virtual copy of 103 * memory from one map to another. 104 * 105 * Synchronization is required prior to most operations. 106 * 107 * Maps consist of an ordered doubly-linked list of simple 108 * entries; a self-adjusting binary search tree of these 109 * entries is used to speed up lookups. 110 * 111 * Since portions of maps are specified by start/end addresses, 112 * which may not align with existing map entries, all 113 * routines merely "clip" entries to these start/end values. 114 * [That is, an entry is split into two, bordering at a 115 * start or end value.] Note that these clippings may not 116 * always be necessary (as the two resulting entries are then 117 * not changed); however, the clipping is done for convenience. 118 * 119 * As mentioned above, virtual copy operations are performed 120 * by copying VM object references from one map to 121 * another, and then marking both regions as copy-on-write. 122 */ 123 124 static struct mtx map_sleep_mtx; 125 static uma_zone_t mapentzone; 126 static uma_zone_t kmapentzone; 127 static uma_zone_t mapzone; 128 static uma_zone_t vmspace_zone; 129 static int vmspace_zinit(void *mem, int size, int flags); 130 static int vm_map_zinit(void *mem, int ize, int flags); 131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 132 vm_offset_t max); 133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 136 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 137 vm_map_entry_t gap_entry); 138 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 139 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 140 #ifdef INVARIANTS 141 static void vm_map_zdtor(void *mem, int size, void *arg); 142 static void vmspace_zdtor(void *mem, int size, void *arg); 143 #endif 144 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 145 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 146 int cow); 147 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 148 vm_offset_t failed_addr); 149 150 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 151 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 152 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 153 154 /* 155 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 156 * stable. 157 */ 158 #define PROC_VMSPACE_LOCK(p) do { } while (0) 159 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 160 161 /* 162 * VM_MAP_RANGE_CHECK: [ internal use only ] 163 * 164 * Asserts that the starting and ending region 165 * addresses fall within the valid range of the map. 166 */ 167 #define VM_MAP_RANGE_CHECK(map, start, end) \ 168 { \ 169 if (start < vm_map_min(map)) \ 170 start = vm_map_min(map); \ 171 if (end > vm_map_max(map)) \ 172 end = vm_map_max(map); \ 173 if (start > end) \ 174 start = end; \ 175 } 176 177 /* 178 * vm_map_startup: 179 * 180 * Initialize the vm_map module. Must be called before 181 * any other vm_map routines. 182 * 183 * Map and entry structures are allocated from the general 184 * purpose memory pool with some exceptions: 185 * 186 * - The kernel map and kmem submap are allocated statically. 187 * - Kernel map entries are allocated out of a static pool. 188 * 189 * These restrictions are necessary since malloc() uses the 190 * maps and requires map entries. 191 */ 192 193 void 194 vm_map_startup(void) 195 { 196 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 197 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 198 #ifdef INVARIANTS 199 vm_map_zdtor, 200 #else 201 NULL, 202 #endif 203 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 204 uma_prealloc(mapzone, MAX_KMAP); 205 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 206 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 207 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 208 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 209 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 210 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 211 #ifdef INVARIANTS 212 vmspace_zdtor, 213 #else 214 NULL, 215 #endif 216 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 217 } 218 219 static int 220 vmspace_zinit(void *mem, int size, int flags) 221 { 222 struct vmspace *vm; 223 224 vm = (struct vmspace *)mem; 225 226 vm->vm_map.pmap = NULL; 227 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 228 PMAP_LOCK_INIT(vmspace_pmap(vm)); 229 return (0); 230 } 231 232 static int 233 vm_map_zinit(void *mem, int size, int flags) 234 { 235 vm_map_t map; 236 237 map = (vm_map_t)mem; 238 memset(map, 0, sizeof(*map)); 239 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 240 sx_init(&map->lock, "vm map (user)"); 241 return (0); 242 } 243 244 #ifdef INVARIANTS 245 static void 246 vmspace_zdtor(void *mem, int size, void *arg) 247 { 248 struct vmspace *vm; 249 250 vm = (struct vmspace *)mem; 251 252 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 253 } 254 static void 255 vm_map_zdtor(void *mem, int size, void *arg) 256 { 257 vm_map_t map; 258 259 map = (vm_map_t)mem; 260 KASSERT(map->nentries == 0, 261 ("map %p nentries == %d on free.", 262 map, map->nentries)); 263 KASSERT(map->size == 0, 264 ("map %p size == %lu on free.", 265 map, (unsigned long)map->size)); 266 } 267 #endif /* INVARIANTS */ 268 269 /* 270 * Allocate a vmspace structure, including a vm_map and pmap, 271 * and initialize those structures. The refcnt is set to 1. 272 * 273 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 274 */ 275 struct vmspace * 276 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 277 { 278 struct vmspace *vm; 279 280 vm = uma_zalloc(vmspace_zone, M_WAITOK); 281 282 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 283 284 if (pinit == NULL) 285 pinit = &pmap_pinit; 286 287 if (!pinit(vmspace_pmap(vm))) { 288 uma_zfree(vmspace_zone, vm); 289 return (NULL); 290 } 291 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 292 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 293 vm->vm_refcnt = 1; 294 vm->vm_shm = NULL; 295 vm->vm_swrss = 0; 296 vm->vm_tsize = 0; 297 vm->vm_dsize = 0; 298 vm->vm_ssize = 0; 299 vm->vm_taddr = 0; 300 vm->vm_daddr = 0; 301 vm->vm_maxsaddr = 0; 302 return (vm); 303 } 304 305 #ifdef RACCT 306 static void 307 vmspace_container_reset(struct proc *p) 308 { 309 310 PROC_LOCK(p); 311 racct_set(p, RACCT_DATA, 0); 312 racct_set(p, RACCT_STACK, 0); 313 racct_set(p, RACCT_RSS, 0); 314 racct_set(p, RACCT_MEMLOCK, 0); 315 racct_set(p, RACCT_VMEM, 0); 316 PROC_UNLOCK(p); 317 } 318 #endif 319 320 static inline void 321 vmspace_dofree(struct vmspace *vm) 322 { 323 324 CTR1(KTR_VM, "vmspace_free: %p", vm); 325 326 /* 327 * Make sure any SysV shm is freed, it might not have been in 328 * exit1(). 329 */ 330 shmexit(vm); 331 332 /* 333 * Lock the map, to wait out all other references to it. 334 * Delete all of the mappings and pages they hold, then call 335 * the pmap module to reclaim anything left. 336 */ 337 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 338 vm->vm_map.max_offset); 339 340 pmap_release(vmspace_pmap(vm)); 341 vm->vm_map.pmap = NULL; 342 uma_zfree(vmspace_zone, vm); 343 } 344 345 void 346 vmspace_free(struct vmspace *vm) 347 { 348 349 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 350 "vmspace_free() called"); 351 352 if (vm->vm_refcnt == 0) 353 panic("vmspace_free: attempt to free already freed vmspace"); 354 355 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 356 vmspace_dofree(vm); 357 } 358 359 void 360 vmspace_exitfree(struct proc *p) 361 { 362 struct vmspace *vm; 363 364 PROC_VMSPACE_LOCK(p); 365 vm = p->p_vmspace; 366 p->p_vmspace = NULL; 367 PROC_VMSPACE_UNLOCK(p); 368 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 369 vmspace_free(vm); 370 } 371 372 void 373 vmspace_exit(struct thread *td) 374 { 375 int refcnt; 376 struct vmspace *vm; 377 struct proc *p; 378 379 /* 380 * Release user portion of address space. 381 * This releases references to vnodes, 382 * which could cause I/O if the file has been unlinked. 383 * Need to do this early enough that we can still sleep. 384 * 385 * The last exiting process to reach this point releases as 386 * much of the environment as it can. vmspace_dofree() is the 387 * slower fallback in case another process had a temporary 388 * reference to the vmspace. 389 */ 390 391 p = td->td_proc; 392 vm = p->p_vmspace; 393 atomic_add_int(&vmspace0.vm_refcnt, 1); 394 do { 395 refcnt = vm->vm_refcnt; 396 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 397 /* Switch now since other proc might free vmspace */ 398 PROC_VMSPACE_LOCK(p); 399 p->p_vmspace = &vmspace0; 400 PROC_VMSPACE_UNLOCK(p); 401 pmap_activate(td); 402 } 403 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 404 if (refcnt == 1) { 405 if (p->p_vmspace != vm) { 406 /* vmspace not yet freed, switch back */ 407 PROC_VMSPACE_LOCK(p); 408 p->p_vmspace = vm; 409 PROC_VMSPACE_UNLOCK(p); 410 pmap_activate(td); 411 } 412 pmap_remove_pages(vmspace_pmap(vm)); 413 /* Switch now since this proc will free vmspace */ 414 PROC_VMSPACE_LOCK(p); 415 p->p_vmspace = &vmspace0; 416 PROC_VMSPACE_UNLOCK(p); 417 pmap_activate(td); 418 vmspace_dofree(vm); 419 } 420 #ifdef RACCT 421 if (racct_enable) 422 vmspace_container_reset(p); 423 #endif 424 } 425 426 /* Acquire reference to vmspace owned by another process. */ 427 428 struct vmspace * 429 vmspace_acquire_ref(struct proc *p) 430 { 431 struct vmspace *vm; 432 int refcnt; 433 434 PROC_VMSPACE_LOCK(p); 435 vm = p->p_vmspace; 436 if (vm == NULL) { 437 PROC_VMSPACE_UNLOCK(p); 438 return (NULL); 439 } 440 do { 441 refcnt = vm->vm_refcnt; 442 if (refcnt <= 0) { /* Avoid 0->1 transition */ 443 PROC_VMSPACE_UNLOCK(p); 444 return (NULL); 445 } 446 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 447 if (vm != p->p_vmspace) { 448 PROC_VMSPACE_UNLOCK(p); 449 vmspace_free(vm); 450 return (NULL); 451 } 452 PROC_VMSPACE_UNLOCK(p); 453 return (vm); 454 } 455 456 /* 457 * Switch between vmspaces in an AIO kernel process. 458 * 459 * The AIO kernel processes switch to and from a user process's 460 * vmspace while performing an I/O operation on behalf of a user 461 * process. The new vmspace is either the vmspace of a user process 462 * obtained from an active AIO request or the initial vmspace of the 463 * AIO kernel process (when it is idling). Because user processes 464 * will block to drain any active AIO requests before proceeding in 465 * exit() or execve(), the vmspace reference count for these vmspaces 466 * can never be 0. This allows for a much simpler implementation than 467 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel 468 * processes hold an extra reference on their initial vmspace for the 469 * life of the process so that this guarantee is true for any vmspace 470 * passed as 'newvm'. 471 */ 472 void 473 vmspace_switch_aio(struct vmspace *newvm) 474 { 475 struct vmspace *oldvm; 476 477 /* XXX: Need some way to assert that this is an aio daemon. */ 478 479 KASSERT(newvm->vm_refcnt > 0, 480 ("vmspace_switch_aio: newvm unreferenced")); 481 482 oldvm = curproc->p_vmspace; 483 if (oldvm == newvm) 484 return; 485 486 /* 487 * Point to the new address space and refer to it. 488 */ 489 curproc->p_vmspace = newvm; 490 atomic_add_int(&newvm->vm_refcnt, 1); 491 492 /* Activate the new mapping. */ 493 pmap_activate(curthread); 494 495 /* Remove the daemon's reference to the old address space. */ 496 KASSERT(oldvm->vm_refcnt > 1, 497 ("vmspace_switch_aio: oldvm dropping last reference")); 498 vmspace_free(oldvm); 499 } 500 501 void 502 _vm_map_lock(vm_map_t map, const char *file, int line) 503 { 504 505 if (map->system_map) 506 mtx_lock_flags_(&map->system_mtx, 0, file, line); 507 else 508 sx_xlock_(&map->lock, file, line); 509 map->timestamp++; 510 } 511 512 static void 513 vm_map_process_deferred(void) 514 { 515 struct thread *td; 516 vm_map_entry_t entry, next; 517 vm_object_t object; 518 519 td = curthread; 520 entry = td->td_map_def_user; 521 td->td_map_def_user = NULL; 522 while (entry != NULL) { 523 next = entry->next; 524 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 525 /* 526 * Decrement the object's writemappings and 527 * possibly the vnode's v_writecount. 528 */ 529 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 530 ("Submap with writecount")); 531 object = entry->object.vm_object; 532 KASSERT(object != NULL, ("No object for writecount")); 533 vnode_pager_release_writecount(object, entry->start, 534 entry->end); 535 } 536 vm_map_entry_deallocate(entry, FALSE); 537 entry = next; 538 } 539 } 540 541 void 542 _vm_map_unlock(vm_map_t map, const char *file, int line) 543 { 544 545 if (map->system_map) 546 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 547 else { 548 sx_xunlock_(&map->lock, file, line); 549 vm_map_process_deferred(); 550 } 551 } 552 553 void 554 _vm_map_lock_read(vm_map_t map, const char *file, int line) 555 { 556 557 if (map->system_map) 558 mtx_lock_flags_(&map->system_mtx, 0, file, line); 559 else 560 sx_slock_(&map->lock, file, line); 561 } 562 563 void 564 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 565 { 566 567 if (map->system_map) 568 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 569 else { 570 sx_sunlock_(&map->lock, file, line); 571 vm_map_process_deferred(); 572 } 573 } 574 575 int 576 _vm_map_trylock(vm_map_t map, const char *file, int line) 577 { 578 int error; 579 580 error = map->system_map ? 581 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 582 !sx_try_xlock_(&map->lock, file, line); 583 if (error == 0) 584 map->timestamp++; 585 return (error == 0); 586 } 587 588 int 589 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 590 { 591 int error; 592 593 error = map->system_map ? 594 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 595 !sx_try_slock_(&map->lock, file, line); 596 return (error == 0); 597 } 598 599 /* 600 * _vm_map_lock_upgrade: [ internal use only ] 601 * 602 * Tries to upgrade a read (shared) lock on the specified map to a write 603 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 604 * non-zero value if the upgrade fails. If the upgrade fails, the map is 605 * returned without a read or write lock held. 606 * 607 * Requires that the map be read locked. 608 */ 609 int 610 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 611 { 612 unsigned int last_timestamp; 613 614 if (map->system_map) { 615 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 616 } else { 617 if (!sx_try_upgrade_(&map->lock, file, line)) { 618 last_timestamp = map->timestamp; 619 sx_sunlock_(&map->lock, file, line); 620 vm_map_process_deferred(); 621 /* 622 * If the map's timestamp does not change while the 623 * map is unlocked, then the upgrade succeeds. 624 */ 625 sx_xlock_(&map->lock, file, line); 626 if (last_timestamp != map->timestamp) { 627 sx_xunlock_(&map->lock, file, line); 628 return (1); 629 } 630 } 631 } 632 map->timestamp++; 633 return (0); 634 } 635 636 void 637 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 638 { 639 640 if (map->system_map) { 641 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 642 } else 643 sx_downgrade_(&map->lock, file, line); 644 } 645 646 /* 647 * vm_map_locked: 648 * 649 * Returns a non-zero value if the caller holds a write (exclusive) lock 650 * on the specified map and the value "0" otherwise. 651 */ 652 int 653 vm_map_locked(vm_map_t map) 654 { 655 656 if (map->system_map) 657 return (mtx_owned(&map->system_mtx)); 658 else 659 return (sx_xlocked(&map->lock)); 660 } 661 662 #ifdef INVARIANTS 663 static void 664 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 665 { 666 667 if (map->system_map) 668 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 669 else 670 sx_assert_(&map->lock, SA_XLOCKED, file, line); 671 } 672 673 #define VM_MAP_ASSERT_LOCKED(map) \ 674 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 675 #else 676 #define VM_MAP_ASSERT_LOCKED(map) 677 #endif 678 679 /* 680 * _vm_map_unlock_and_wait: 681 * 682 * Atomically releases the lock on the specified map and puts the calling 683 * thread to sleep. The calling thread will remain asleep until either 684 * vm_map_wakeup() is performed on the map or the specified timeout is 685 * exceeded. 686 * 687 * WARNING! This function does not perform deferred deallocations of 688 * objects and map entries. Therefore, the calling thread is expected to 689 * reacquire the map lock after reawakening and later perform an ordinary 690 * unlock operation, such as vm_map_unlock(), before completing its 691 * operation on the map. 692 */ 693 int 694 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 695 { 696 697 mtx_lock(&map_sleep_mtx); 698 if (map->system_map) 699 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 700 else 701 sx_xunlock_(&map->lock, file, line); 702 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 703 timo)); 704 } 705 706 /* 707 * vm_map_wakeup: 708 * 709 * Awaken any threads that have slept on the map using 710 * vm_map_unlock_and_wait(). 711 */ 712 void 713 vm_map_wakeup(vm_map_t map) 714 { 715 716 /* 717 * Acquire and release map_sleep_mtx to prevent a wakeup() 718 * from being performed (and lost) between the map unlock 719 * and the msleep() in _vm_map_unlock_and_wait(). 720 */ 721 mtx_lock(&map_sleep_mtx); 722 mtx_unlock(&map_sleep_mtx); 723 wakeup(&map->root); 724 } 725 726 void 727 vm_map_busy(vm_map_t map) 728 { 729 730 VM_MAP_ASSERT_LOCKED(map); 731 map->busy++; 732 } 733 734 void 735 vm_map_unbusy(vm_map_t map) 736 { 737 738 VM_MAP_ASSERT_LOCKED(map); 739 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 740 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 741 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 742 wakeup(&map->busy); 743 } 744 } 745 746 void 747 vm_map_wait_busy(vm_map_t map) 748 { 749 750 VM_MAP_ASSERT_LOCKED(map); 751 while (map->busy) { 752 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 753 if (map->system_map) 754 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 755 else 756 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 757 } 758 map->timestamp++; 759 } 760 761 long 762 vmspace_resident_count(struct vmspace *vmspace) 763 { 764 return pmap_resident_count(vmspace_pmap(vmspace)); 765 } 766 767 /* 768 * vm_map_create: 769 * 770 * Creates and returns a new empty VM map with 771 * the given physical map structure, and having 772 * the given lower and upper address bounds. 773 */ 774 vm_map_t 775 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 776 { 777 vm_map_t result; 778 779 result = uma_zalloc(mapzone, M_WAITOK); 780 CTR1(KTR_VM, "vm_map_create: %p", result); 781 _vm_map_init(result, pmap, min, max); 782 return (result); 783 } 784 785 /* 786 * Initialize an existing vm_map structure 787 * such as that in the vmspace structure. 788 */ 789 static void 790 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 791 { 792 793 map->header.next = map->header.prev = &map->header; 794 map->needs_wakeup = FALSE; 795 map->system_map = 0; 796 map->pmap = pmap; 797 map->min_offset = min; 798 map->max_offset = max; 799 map->flags = 0; 800 map->root = NULL; 801 map->timestamp = 0; 802 map->busy = 0; 803 } 804 805 void 806 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 807 { 808 809 _vm_map_init(map, pmap, min, max); 810 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 811 sx_init(&map->lock, "user map"); 812 } 813 814 /* 815 * vm_map_entry_dispose: [ internal use only ] 816 * 817 * Inverse of vm_map_entry_create. 818 */ 819 static void 820 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 821 { 822 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 823 } 824 825 /* 826 * vm_map_entry_create: [ internal use only ] 827 * 828 * Allocates a VM map entry for insertion. 829 * No entry fields are filled in. 830 */ 831 static vm_map_entry_t 832 vm_map_entry_create(vm_map_t map) 833 { 834 vm_map_entry_t new_entry; 835 836 if (map->system_map) 837 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 838 else 839 new_entry = uma_zalloc(mapentzone, M_WAITOK); 840 if (new_entry == NULL) 841 panic("vm_map_entry_create: kernel resources exhausted"); 842 return (new_entry); 843 } 844 845 /* 846 * vm_map_entry_set_behavior: 847 * 848 * Set the expected access behavior, either normal, random, or 849 * sequential. 850 */ 851 static inline void 852 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 853 { 854 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 855 (behavior & MAP_ENTRY_BEHAV_MASK); 856 } 857 858 /* 859 * vm_map_entry_set_max_free: 860 * 861 * Set the max_free field in a vm_map_entry. 862 */ 863 static inline void 864 vm_map_entry_set_max_free(vm_map_entry_t entry) 865 { 866 867 entry->max_free = entry->adj_free; 868 if (entry->left != NULL && entry->left->max_free > entry->max_free) 869 entry->max_free = entry->left->max_free; 870 if (entry->right != NULL && entry->right->max_free > entry->max_free) 871 entry->max_free = entry->right->max_free; 872 } 873 874 /* 875 * vm_map_entry_splay: 876 * 877 * The Sleator and Tarjan top-down splay algorithm with the 878 * following variation. Max_free must be computed bottom-up, so 879 * on the downward pass, maintain the left and right spines in 880 * reverse order. Then, make a second pass up each side to fix 881 * the pointers and compute max_free. The time bound is O(log n) 882 * amortized. 883 * 884 * The new root is the vm_map_entry containing "addr", or else an 885 * adjacent entry (lower or higher) if addr is not in the tree. 886 * 887 * The map must be locked, and leaves it so. 888 * 889 * Returns: the new root. 890 */ 891 static vm_map_entry_t 892 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 893 { 894 vm_map_entry_t llist, rlist; 895 vm_map_entry_t ltree, rtree; 896 vm_map_entry_t y; 897 898 /* Special case of empty tree. */ 899 if (root == NULL) 900 return (root); 901 902 /* 903 * Pass One: Splay down the tree until we find addr or a NULL 904 * pointer where addr would go. llist and rlist are the two 905 * sides in reverse order (bottom-up), with llist linked by 906 * the right pointer and rlist linked by the left pointer in 907 * the vm_map_entry. Wait until Pass Two to set max_free on 908 * the two spines. 909 */ 910 llist = NULL; 911 rlist = NULL; 912 for (;;) { 913 /* root is never NULL in here. */ 914 if (addr < root->start) { 915 y = root->left; 916 if (y == NULL) 917 break; 918 if (addr < y->start && y->left != NULL) { 919 /* Rotate right and put y on rlist. */ 920 root->left = y->right; 921 y->right = root; 922 vm_map_entry_set_max_free(root); 923 root = y->left; 924 y->left = rlist; 925 rlist = y; 926 } else { 927 /* Put root on rlist. */ 928 root->left = rlist; 929 rlist = root; 930 root = y; 931 } 932 } else if (addr >= root->end) { 933 y = root->right; 934 if (y == NULL) 935 break; 936 if (addr >= y->end && y->right != NULL) { 937 /* Rotate left and put y on llist. */ 938 root->right = y->left; 939 y->left = root; 940 vm_map_entry_set_max_free(root); 941 root = y->right; 942 y->right = llist; 943 llist = y; 944 } else { 945 /* Put root on llist. */ 946 root->right = llist; 947 llist = root; 948 root = y; 949 } 950 } else 951 break; 952 } 953 954 /* 955 * Pass Two: Walk back up the two spines, flip the pointers 956 * and set max_free. The subtrees of the root go at the 957 * bottom of llist and rlist. 958 */ 959 ltree = root->left; 960 while (llist != NULL) { 961 y = llist->right; 962 llist->right = ltree; 963 vm_map_entry_set_max_free(llist); 964 ltree = llist; 965 llist = y; 966 } 967 rtree = root->right; 968 while (rlist != NULL) { 969 y = rlist->left; 970 rlist->left = rtree; 971 vm_map_entry_set_max_free(rlist); 972 rtree = rlist; 973 rlist = y; 974 } 975 976 /* 977 * Final assembly: add ltree and rtree as subtrees of root. 978 */ 979 root->left = ltree; 980 root->right = rtree; 981 vm_map_entry_set_max_free(root); 982 983 return (root); 984 } 985 986 /* 987 * vm_map_entry_{un,}link: 988 * 989 * Insert/remove entries from maps. 990 */ 991 static void 992 vm_map_entry_link(vm_map_t map, 993 vm_map_entry_t after_where, 994 vm_map_entry_t entry) 995 { 996 997 CTR4(KTR_VM, 998 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 999 map->nentries, entry, after_where); 1000 VM_MAP_ASSERT_LOCKED(map); 1001 KASSERT(after_where == &map->header || 1002 after_where->end <= entry->start, 1003 ("vm_map_entry_link: prev end %jx new start %jx overlap", 1004 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 1005 KASSERT(after_where->next == &map->header || 1006 entry->end <= after_where->next->start, 1007 ("vm_map_entry_link: new end %jx next start %jx overlap", 1008 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 1009 1010 map->nentries++; 1011 entry->prev = after_where; 1012 entry->next = after_where->next; 1013 entry->next->prev = entry; 1014 after_where->next = entry; 1015 1016 if (after_where != &map->header) { 1017 if (after_where != map->root) 1018 vm_map_entry_splay(after_where->start, map->root); 1019 entry->right = after_where->right; 1020 entry->left = after_where; 1021 after_where->right = NULL; 1022 after_where->adj_free = entry->start - after_where->end; 1023 vm_map_entry_set_max_free(after_where); 1024 } else { 1025 entry->right = map->root; 1026 entry->left = NULL; 1027 } 1028 entry->adj_free = (entry->next == &map->header ? map->max_offset : 1029 entry->next->start) - entry->end; 1030 vm_map_entry_set_max_free(entry); 1031 map->root = entry; 1032 } 1033 1034 static void 1035 vm_map_entry_unlink(vm_map_t map, 1036 vm_map_entry_t entry) 1037 { 1038 vm_map_entry_t next, prev, root; 1039 1040 VM_MAP_ASSERT_LOCKED(map); 1041 if (entry != map->root) 1042 vm_map_entry_splay(entry->start, map->root); 1043 if (entry->left == NULL) 1044 root = entry->right; 1045 else { 1046 root = vm_map_entry_splay(entry->start, entry->left); 1047 root->right = entry->right; 1048 root->adj_free = (entry->next == &map->header ? map->max_offset : 1049 entry->next->start) - root->end; 1050 vm_map_entry_set_max_free(root); 1051 } 1052 map->root = root; 1053 1054 prev = entry->prev; 1055 next = entry->next; 1056 next->prev = prev; 1057 prev->next = next; 1058 map->nentries--; 1059 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1060 map->nentries, entry); 1061 } 1062 1063 /* 1064 * vm_map_entry_resize_free: 1065 * 1066 * Recompute the amount of free space following a vm_map_entry 1067 * and propagate that value up the tree. Call this function after 1068 * resizing a map entry in-place, that is, without a call to 1069 * vm_map_entry_link() or _unlink(). 1070 * 1071 * The map must be locked, and leaves it so. 1072 */ 1073 static void 1074 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1075 { 1076 1077 /* 1078 * Using splay trees without parent pointers, propagating 1079 * max_free up the tree is done by moving the entry to the 1080 * root and making the change there. 1081 */ 1082 if (entry != map->root) 1083 map->root = vm_map_entry_splay(entry->start, map->root); 1084 1085 entry->adj_free = (entry->next == &map->header ? map->max_offset : 1086 entry->next->start) - entry->end; 1087 vm_map_entry_set_max_free(entry); 1088 } 1089 1090 /* 1091 * vm_map_lookup_entry: [ internal use only ] 1092 * 1093 * Finds the map entry containing (or 1094 * immediately preceding) the specified address 1095 * in the given map; the entry is returned 1096 * in the "entry" parameter. The boolean 1097 * result indicates whether the address is 1098 * actually contained in the map. 1099 */ 1100 boolean_t 1101 vm_map_lookup_entry( 1102 vm_map_t map, 1103 vm_offset_t address, 1104 vm_map_entry_t *entry) /* OUT */ 1105 { 1106 vm_map_entry_t cur; 1107 boolean_t locked; 1108 1109 /* 1110 * If the map is empty, then the map entry immediately preceding 1111 * "address" is the map's header. 1112 */ 1113 cur = map->root; 1114 if (cur == NULL) 1115 *entry = &map->header; 1116 else if (address >= cur->start && cur->end > address) { 1117 *entry = cur; 1118 return (TRUE); 1119 } else if ((locked = vm_map_locked(map)) || 1120 sx_try_upgrade(&map->lock)) { 1121 /* 1122 * Splay requires a write lock on the map. However, it only 1123 * restructures the binary search tree; it does not otherwise 1124 * change the map. Thus, the map's timestamp need not change 1125 * on a temporary upgrade. 1126 */ 1127 map->root = cur = vm_map_entry_splay(address, cur); 1128 if (!locked) 1129 sx_downgrade(&map->lock); 1130 1131 /* 1132 * If "address" is contained within a map entry, the new root 1133 * is that map entry. Otherwise, the new root is a map entry 1134 * immediately before or after "address". 1135 */ 1136 if (address >= cur->start) { 1137 *entry = cur; 1138 if (cur->end > address) 1139 return (TRUE); 1140 } else 1141 *entry = cur->prev; 1142 } else 1143 /* 1144 * Since the map is only locked for read access, perform a 1145 * standard binary search tree lookup for "address". 1146 */ 1147 for (;;) { 1148 if (address < cur->start) { 1149 if (cur->left == NULL) { 1150 *entry = cur->prev; 1151 break; 1152 } 1153 cur = cur->left; 1154 } else if (cur->end > address) { 1155 *entry = cur; 1156 return (TRUE); 1157 } else { 1158 if (cur->right == NULL) { 1159 *entry = cur; 1160 break; 1161 } 1162 cur = cur->right; 1163 } 1164 } 1165 return (FALSE); 1166 } 1167 1168 /* 1169 * vm_map_insert: 1170 * 1171 * Inserts the given whole VM object into the target 1172 * map at the specified address range. The object's 1173 * size should match that of the address range. 1174 * 1175 * Requires that the map be locked, and leaves it so. 1176 * 1177 * If object is non-NULL, ref count must be bumped by caller 1178 * prior to making call to account for the new entry. 1179 */ 1180 int 1181 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1182 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1183 { 1184 vm_map_entry_t new_entry, prev_entry, temp_entry; 1185 struct ucred *cred; 1186 vm_eflags_t protoeflags; 1187 vm_inherit_t inheritance; 1188 1189 VM_MAP_ASSERT_LOCKED(map); 1190 KASSERT((object != kmem_object && object != kernel_object) || 1191 (cow & MAP_COPY_ON_WRITE) == 0, 1192 ("vm_map_insert: kmem or kernel object and COW")); 1193 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1194 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1195 KASSERT((prot & ~max) == 0, 1196 ("prot %#x is not subset of max_prot %#x", prot, max)); 1197 1198 /* 1199 * Check that the start and end points are not bogus. 1200 */ 1201 if (start < map->min_offset || end > map->max_offset || start >= end) 1202 return (KERN_INVALID_ADDRESS); 1203 1204 /* 1205 * Find the entry prior to the proposed starting address; if it's part 1206 * of an existing entry, this range is bogus. 1207 */ 1208 if (vm_map_lookup_entry(map, start, &temp_entry)) 1209 return (KERN_NO_SPACE); 1210 1211 prev_entry = temp_entry; 1212 1213 /* 1214 * Assert that the next entry doesn't overlap the end point. 1215 */ 1216 if (prev_entry->next != &map->header && prev_entry->next->start < end) 1217 return (KERN_NO_SPACE); 1218 1219 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1220 max != VM_PROT_NONE)) 1221 return (KERN_INVALID_ARGUMENT); 1222 1223 protoeflags = 0; 1224 if (cow & MAP_COPY_ON_WRITE) 1225 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1226 if (cow & MAP_NOFAULT) 1227 protoeflags |= MAP_ENTRY_NOFAULT; 1228 if (cow & MAP_DISABLE_SYNCER) 1229 protoeflags |= MAP_ENTRY_NOSYNC; 1230 if (cow & MAP_DISABLE_COREDUMP) 1231 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1232 if (cow & MAP_STACK_GROWS_DOWN) 1233 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1234 if (cow & MAP_STACK_GROWS_UP) 1235 protoeflags |= MAP_ENTRY_GROWS_UP; 1236 if (cow & MAP_VN_WRITECOUNT) 1237 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1238 if ((cow & MAP_CREATE_GUARD) != 0) 1239 protoeflags |= MAP_ENTRY_GUARD; 1240 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1241 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1242 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1243 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1244 if (cow & MAP_INHERIT_SHARE) 1245 inheritance = VM_INHERIT_SHARE; 1246 else 1247 inheritance = VM_INHERIT_DEFAULT; 1248 1249 cred = NULL; 1250 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1251 goto charged; 1252 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1253 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1254 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1255 return (KERN_RESOURCE_SHORTAGE); 1256 KASSERT(object == NULL || 1257 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1258 object->cred == NULL, 1259 ("overcommit: vm_map_insert o %p", object)); 1260 cred = curthread->td_ucred; 1261 } 1262 1263 charged: 1264 /* Expand the kernel pmap, if necessary. */ 1265 if (map == kernel_map && end > kernel_vm_end) 1266 pmap_growkernel(end); 1267 if (object != NULL) { 1268 /* 1269 * OBJ_ONEMAPPING must be cleared unless this mapping 1270 * is trivially proven to be the only mapping for any 1271 * of the object's pages. (Object granularity 1272 * reference counting is insufficient to recognize 1273 * aliases with precision.) 1274 */ 1275 VM_OBJECT_WLOCK(object); 1276 if (object->ref_count > 1 || object->shadow_count != 0) 1277 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1278 VM_OBJECT_WUNLOCK(object); 1279 } else if (prev_entry != &map->header && 1280 prev_entry->eflags == protoeflags && 1281 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1282 prev_entry->end == start && prev_entry->wired_count == 0 && 1283 (prev_entry->cred == cred || 1284 (prev_entry->object.vm_object != NULL && 1285 prev_entry->object.vm_object->cred == cred)) && 1286 vm_object_coalesce(prev_entry->object.vm_object, 1287 prev_entry->offset, 1288 (vm_size_t)(prev_entry->end - prev_entry->start), 1289 (vm_size_t)(end - prev_entry->end), cred != NULL && 1290 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1291 /* 1292 * We were able to extend the object. Determine if we 1293 * can extend the previous map entry to include the 1294 * new range as well. 1295 */ 1296 if (prev_entry->inheritance == inheritance && 1297 prev_entry->protection == prot && 1298 prev_entry->max_protection == max) { 1299 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1300 map->size += end - prev_entry->end; 1301 prev_entry->end = end; 1302 vm_map_entry_resize_free(map, prev_entry); 1303 vm_map_simplify_entry(map, prev_entry); 1304 return (KERN_SUCCESS); 1305 } 1306 1307 /* 1308 * If we can extend the object but cannot extend the 1309 * map entry, we have to create a new map entry. We 1310 * must bump the ref count on the extended object to 1311 * account for it. object may be NULL. 1312 */ 1313 object = prev_entry->object.vm_object; 1314 offset = prev_entry->offset + 1315 (prev_entry->end - prev_entry->start); 1316 vm_object_reference(object); 1317 if (cred != NULL && object != NULL && object->cred != NULL && 1318 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1319 /* Object already accounts for this uid. */ 1320 cred = NULL; 1321 } 1322 } 1323 if (cred != NULL) 1324 crhold(cred); 1325 1326 /* 1327 * Create a new entry 1328 */ 1329 new_entry = vm_map_entry_create(map); 1330 new_entry->start = start; 1331 new_entry->end = end; 1332 new_entry->cred = NULL; 1333 1334 new_entry->eflags = protoeflags; 1335 new_entry->object.vm_object = object; 1336 new_entry->offset = offset; 1337 1338 new_entry->inheritance = inheritance; 1339 new_entry->protection = prot; 1340 new_entry->max_protection = max; 1341 new_entry->wired_count = 0; 1342 new_entry->wiring_thread = NULL; 1343 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1344 new_entry->next_read = start; 1345 1346 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1347 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1348 new_entry->cred = cred; 1349 1350 /* 1351 * Insert the new entry into the list 1352 */ 1353 vm_map_entry_link(map, prev_entry, new_entry); 1354 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1355 map->size += new_entry->end - new_entry->start; 1356 1357 /* 1358 * Try to coalesce the new entry with both the previous and next 1359 * entries in the list. Previously, we only attempted to coalesce 1360 * with the previous entry when object is NULL. Here, we handle the 1361 * other cases, which are less common. 1362 */ 1363 vm_map_simplify_entry(map, new_entry); 1364 1365 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1366 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1367 end - start, cow & MAP_PREFAULT_PARTIAL); 1368 } 1369 1370 return (KERN_SUCCESS); 1371 } 1372 1373 /* 1374 * vm_map_findspace: 1375 * 1376 * Find the first fit (lowest VM address) for "length" free bytes 1377 * beginning at address >= start in the given map. 1378 * 1379 * In a vm_map_entry, "adj_free" is the amount of free space 1380 * adjacent (higher address) to this entry, and "max_free" is the 1381 * maximum amount of contiguous free space in its subtree. This 1382 * allows finding a free region in one path down the tree, so 1383 * O(log n) amortized with splay trees. 1384 * 1385 * The map must be locked, and leaves it so. 1386 * 1387 * Returns: 0 on success, and starting address in *addr, 1388 * 1 if insufficient space. 1389 */ 1390 int 1391 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1392 vm_offset_t *addr) /* OUT */ 1393 { 1394 vm_map_entry_t entry; 1395 vm_offset_t st; 1396 1397 /* 1398 * Request must fit within min/max VM address and must avoid 1399 * address wrap. 1400 */ 1401 if (start < map->min_offset) 1402 start = map->min_offset; 1403 if (start + length > map->max_offset || start + length < start) 1404 return (1); 1405 1406 /* Empty tree means wide open address space. */ 1407 if (map->root == NULL) { 1408 *addr = start; 1409 return (0); 1410 } 1411 1412 /* 1413 * After splay, if start comes before root node, then there 1414 * must be a gap from start to the root. 1415 */ 1416 map->root = vm_map_entry_splay(start, map->root); 1417 if (start + length <= map->root->start) { 1418 *addr = start; 1419 return (0); 1420 } 1421 1422 /* 1423 * Root is the last node that might begin its gap before 1424 * start, and this is the last comparison where address 1425 * wrap might be a problem. 1426 */ 1427 st = (start > map->root->end) ? start : map->root->end; 1428 if (length <= map->root->end + map->root->adj_free - st) { 1429 *addr = st; 1430 return (0); 1431 } 1432 1433 /* With max_free, can immediately tell if no solution. */ 1434 entry = map->root->right; 1435 if (entry == NULL || length > entry->max_free) 1436 return (1); 1437 1438 /* 1439 * Search the right subtree in the order: left subtree, root, 1440 * right subtree (first fit). The previous splay implies that 1441 * all regions in the right subtree have addresses > start. 1442 */ 1443 while (entry != NULL) { 1444 if (entry->left != NULL && entry->left->max_free >= length) 1445 entry = entry->left; 1446 else if (entry->adj_free >= length) { 1447 *addr = entry->end; 1448 return (0); 1449 } else 1450 entry = entry->right; 1451 } 1452 1453 /* Can't get here, so panic if we do. */ 1454 panic("vm_map_findspace: max_free corrupt"); 1455 } 1456 1457 int 1458 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1459 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1460 vm_prot_t max, int cow) 1461 { 1462 vm_offset_t end; 1463 int result; 1464 1465 end = start + length; 1466 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1467 object == NULL, 1468 ("vm_map_fixed: non-NULL backing object for stack")); 1469 vm_map_lock(map); 1470 VM_MAP_RANGE_CHECK(map, start, end); 1471 if ((cow & MAP_CHECK_EXCL) == 0) 1472 vm_map_delete(map, start, end); 1473 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1474 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1475 prot, max, cow); 1476 } else { 1477 result = vm_map_insert(map, object, offset, start, end, 1478 prot, max, cow); 1479 } 1480 vm_map_unlock(map); 1481 return (result); 1482 } 1483 1484 /* 1485 * vm_map_find finds an unallocated region in the target address 1486 * map with the given length. The search is defined to be 1487 * first-fit from the specified address; the region found is 1488 * returned in the same parameter. 1489 * 1490 * If object is non-NULL, ref count must be bumped by caller 1491 * prior to making call to account for the new entry. 1492 */ 1493 int 1494 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1495 vm_offset_t *addr, /* IN/OUT */ 1496 vm_size_t length, vm_offset_t max_addr, int find_space, 1497 vm_prot_t prot, vm_prot_t max, int cow) 1498 { 1499 vm_offset_t alignment, initial_addr, start; 1500 int result; 1501 1502 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1503 object == NULL, 1504 ("vm_map_find: non-NULL backing object for stack")); 1505 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1506 (object->flags & OBJ_COLORED) == 0)) 1507 find_space = VMFS_ANY_SPACE; 1508 if (find_space >> 8 != 0) { 1509 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1510 alignment = (vm_offset_t)1 << (find_space >> 8); 1511 } else 1512 alignment = 0; 1513 initial_addr = *addr; 1514 again: 1515 start = initial_addr; 1516 vm_map_lock(map); 1517 do { 1518 if (find_space != VMFS_NO_SPACE) { 1519 if (vm_map_findspace(map, start, length, addr) || 1520 (max_addr != 0 && *addr + length > max_addr)) { 1521 vm_map_unlock(map); 1522 if (find_space == VMFS_OPTIMAL_SPACE) { 1523 find_space = VMFS_ANY_SPACE; 1524 goto again; 1525 } 1526 return (KERN_NO_SPACE); 1527 } 1528 switch (find_space) { 1529 case VMFS_SUPER_SPACE: 1530 case VMFS_OPTIMAL_SPACE: 1531 pmap_align_superpage(object, offset, addr, 1532 length); 1533 break; 1534 case VMFS_ANY_SPACE: 1535 break; 1536 default: 1537 if ((*addr & (alignment - 1)) != 0) { 1538 *addr &= ~(alignment - 1); 1539 *addr += alignment; 1540 } 1541 break; 1542 } 1543 1544 start = *addr; 1545 } 1546 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1547 result = vm_map_stack_locked(map, start, length, 1548 sgrowsiz, prot, max, cow); 1549 } else { 1550 result = vm_map_insert(map, object, offset, start, 1551 start + length, prot, max, cow); 1552 } 1553 } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && 1554 find_space != VMFS_ANY_SPACE); 1555 vm_map_unlock(map); 1556 return (result); 1557 } 1558 1559 int 1560 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1561 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1562 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1563 int cow) 1564 { 1565 vm_offset_t hint; 1566 int rv; 1567 1568 hint = *addr; 1569 for (;;) { 1570 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1571 find_space, prot, max, cow); 1572 if (rv == KERN_SUCCESS || min_addr >= hint) 1573 return (rv); 1574 *addr = min_addr; 1575 } 1576 } 1577 1578 /* 1579 * vm_map_simplify_entry: 1580 * 1581 * Simplify the given map entry by merging with either neighbor. This 1582 * routine also has the ability to merge with both neighbors. 1583 * 1584 * The map must be locked. 1585 * 1586 * This routine guarantees that the passed entry remains valid (though 1587 * possibly extended). When merging, this routine may delete one or 1588 * both neighbors. 1589 */ 1590 void 1591 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1592 { 1593 vm_map_entry_t next, prev; 1594 vm_size_t prevsize, esize; 1595 1596 if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1597 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1598 return; 1599 1600 prev = entry->prev; 1601 if (prev != &map->header) { 1602 prevsize = prev->end - prev->start; 1603 if ( (prev->end == entry->start) && 1604 (prev->object.vm_object == entry->object.vm_object) && 1605 (!prev->object.vm_object || 1606 (prev->offset + prevsize == entry->offset)) && 1607 (prev->eflags == entry->eflags) && 1608 (prev->protection == entry->protection) && 1609 (prev->max_protection == entry->max_protection) && 1610 (prev->inheritance == entry->inheritance) && 1611 (prev->wired_count == entry->wired_count) && 1612 (prev->cred == entry->cred)) { 1613 vm_map_entry_unlink(map, prev); 1614 entry->start = prev->start; 1615 entry->offset = prev->offset; 1616 if (entry->prev != &map->header) 1617 vm_map_entry_resize_free(map, entry->prev); 1618 1619 /* 1620 * If the backing object is a vnode object, 1621 * vm_object_deallocate() calls vrele(). 1622 * However, vrele() does not lock the vnode 1623 * because the vnode has additional 1624 * references. Thus, the map lock can be kept 1625 * without causing a lock-order reversal with 1626 * the vnode lock. 1627 * 1628 * Since we count the number of virtual page 1629 * mappings in object->un_pager.vnp.writemappings, 1630 * the writemappings value should not be adjusted 1631 * when the entry is disposed of. 1632 */ 1633 if (prev->object.vm_object) 1634 vm_object_deallocate(prev->object.vm_object); 1635 if (prev->cred != NULL) 1636 crfree(prev->cred); 1637 vm_map_entry_dispose(map, prev); 1638 } 1639 } 1640 1641 next = entry->next; 1642 if (next != &map->header) { 1643 esize = entry->end - entry->start; 1644 if ((entry->end == next->start) && 1645 (next->object.vm_object == entry->object.vm_object) && 1646 (!entry->object.vm_object || 1647 (entry->offset + esize == next->offset)) && 1648 (next->eflags == entry->eflags) && 1649 (next->protection == entry->protection) && 1650 (next->max_protection == entry->max_protection) && 1651 (next->inheritance == entry->inheritance) && 1652 (next->wired_count == entry->wired_count) && 1653 (next->cred == entry->cred)) { 1654 vm_map_entry_unlink(map, next); 1655 entry->end = next->end; 1656 vm_map_entry_resize_free(map, entry); 1657 1658 /* 1659 * See comment above. 1660 */ 1661 if (next->object.vm_object) 1662 vm_object_deallocate(next->object.vm_object); 1663 if (next->cred != NULL) 1664 crfree(next->cred); 1665 vm_map_entry_dispose(map, next); 1666 } 1667 } 1668 } 1669 /* 1670 * vm_map_clip_start: [ internal use only ] 1671 * 1672 * Asserts that the given entry begins at or after 1673 * the specified address; if necessary, 1674 * it splits the entry into two. 1675 */ 1676 #define vm_map_clip_start(map, entry, startaddr) \ 1677 { \ 1678 if (startaddr > entry->start) \ 1679 _vm_map_clip_start(map, entry, startaddr); \ 1680 } 1681 1682 /* 1683 * This routine is called only when it is known that 1684 * the entry must be split. 1685 */ 1686 static void 1687 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1688 { 1689 vm_map_entry_t new_entry; 1690 1691 VM_MAP_ASSERT_LOCKED(map); 1692 KASSERT(entry->end > start && entry->start < start, 1693 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 1694 1695 /* 1696 * Split off the front portion -- note that we must insert the new 1697 * entry BEFORE this one, so that this entry has the specified 1698 * starting address. 1699 */ 1700 vm_map_simplify_entry(map, entry); 1701 1702 /* 1703 * If there is no object backing this entry, we might as well create 1704 * one now. If we defer it, an object can get created after the map 1705 * is clipped, and individual objects will be created for the split-up 1706 * map. This is a bit of a hack, but is also about the best place to 1707 * put this improvement. 1708 */ 1709 if (entry->object.vm_object == NULL && !map->system_map && 1710 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1711 vm_object_t object; 1712 object = vm_object_allocate(OBJT_DEFAULT, 1713 atop(entry->end - entry->start)); 1714 entry->object.vm_object = object; 1715 entry->offset = 0; 1716 if (entry->cred != NULL) { 1717 object->cred = entry->cred; 1718 object->charge = entry->end - entry->start; 1719 entry->cred = NULL; 1720 } 1721 } else if (entry->object.vm_object != NULL && 1722 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1723 entry->cred != NULL) { 1724 VM_OBJECT_WLOCK(entry->object.vm_object); 1725 KASSERT(entry->object.vm_object->cred == NULL, 1726 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1727 entry->object.vm_object->cred = entry->cred; 1728 entry->object.vm_object->charge = entry->end - entry->start; 1729 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1730 entry->cred = NULL; 1731 } 1732 1733 new_entry = vm_map_entry_create(map); 1734 *new_entry = *entry; 1735 1736 new_entry->end = start; 1737 entry->offset += (start - entry->start); 1738 entry->start = start; 1739 if (new_entry->cred != NULL) 1740 crhold(entry->cred); 1741 1742 vm_map_entry_link(map, entry->prev, new_entry); 1743 1744 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1745 vm_object_reference(new_entry->object.vm_object); 1746 /* 1747 * The object->un_pager.vnp.writemappings for the 1748 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1749 * kept as is here. The virtual pages are 1750 * re-distributed among the clipped entries, so the sum is 1751 * left the same. 1752 */ 1753 } 1754 } 1755 1756 /* 1757 * vm_map_clip_end: [ internal use only ] 1758 * 1759 * Asserts that the given entry ends at or before 1760 * the specified address; if necessary, 1761 * it splits the entry into two. 1762 */ 1763 #define vm_map_clip_end(map, entry, endaddr) \ 1764 { \ 1765 if ((endaddr) < (entry->end)) \ 1766 _vm_map_clip_end((map), (entry), (endaddr)); \ 1767 } 1768 1769 /* 1770 * This routine is called only when it is known that 1771 * the entry must be split. 1772 */ 1773 static void 1774 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1775 { 1776 vm_map_entry_t new_entry; 1777 1778 VM_MAP_ASSERT_LOCKED(map); 1779 KASSERT(entry->start < end && entry->end > end, 1780 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 1781 1782 /* 1783 * If there is no object backing this entry, we might as well create 1784 * one now. If we defer it, an object can get created after the map 1785 * is clipped, and individual objects will be created for the split-up 1786 * map. This is a bit of a hack, but is also about the best place to 1787 * put this improvement. 1788 */ 1789 if (entry->object.vm_object == NULL && !map->system_map && 1790 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1791 vm_object_t object; 1792 object = vm_object_allocate(OBJT_DEFAULT, 1793 atop(entry->end - entry->start)); 1794 entry->object.vm_object = object; 1795 entry->offset = 0; 1796 if (entry->cred != NULL) { 1797 object->cred = entry->cred; 1798 object->charge = entry->end - entry->start; 1799 entry->cred = NULL; 1800 } 1801 } else if (entry->object.vm_object != NULL && 1802 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1803 entry->cred != NULL) { 1804 VM_OBJECT_WLOCK(entry->object.vm_object); 1805 KASSERT(entry->object.vm_object->cred == NULL, 1806 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1807 entry->object.vm_object->cred = entry->cred; 1808 entry->object.vm_object->charge = entry->end - entry->start; 1809 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1810 entry->cred = NULL; 1811 } 1812 1813 /* 1814 * Create a new entry and insert it AFTER the specified entry 1815 */ 1816 new_entry = vm_map_entry_create(map); 1817 *new_entry = *entry; 1818 1819 new_entry->start = entry->end = end; 1820 new_entry->offset += (end - entry->start); 1821 if (new_entry->cred != NULL) 1822 crhold(entry->cred); 1823 1824 vm_map_entry_link(map, entry, new_entry); 1825 1826 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1827 vm_object_reference(new_entry->object.vm_object); 1828 } 1829 } 1830 1831 /* 1832 * vm_map_submap: [ kernel use only ] 1833 * 1834 * Mark the given range as handled by a subordinate map. 1835 * 1836 * This range must have been created with vm_map_find, 1837 * and no other operations may have been performed on this 1838 * range prior to calling vm_map_submap. 1839 * 1840 * Only a limited number of operations can be performed 1841 * within this rage after calling vm_map_submap: 1842 * vm_fault 1843 * [Don't try vm_map_copy!] 1844 * 1845 * To remove a submapping, one must first remove the 1846 * range from the superior map, and then destroy the 1847 * submap (if desired). [Better yet, don't try it.] 1848 */ 1849 int 1850 vm_map_submap( 1851 vm_map_t map, 1852 vm_offset_t start, 1853 vm_offset_t end, 1854 vm_map_t submap) 1855 { 1856 vm_map_entry_t entry; 1857 int result = KERN_INVALID_ARGUMENT; 1858 1859 vm_map_lock(map); 1860 1861 VM_MAP_RANGE_CHECK(map, start, end); 1862 1863 if (vm_map_lookup_entry(map, start, &entry)) { 1864 vm_map_clip_start(map, entry, start); 1865 } else 1866 entry = entry->next; 1867 1868 vm_map_clip_end(map, entry, end); 1869 1870 if ((entry->start == start) && (entry->end == end) && 1871 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1872 (entry->object.vm_object == NULL)) { 1873 entry->object.sub_map = submap; 1874 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1875 result = KERN_SUCCESS; 1876 } 1877 vm_map_unlock(map); 1878 1879 return (result); 1880 } 1881 1882 /* 1883 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1884 */ 1885 #define MAX_INIT_PT 96 1886 1887 /* 1888 * vm_map_pmap_enter: 1889 * 1890 * Preload the specified map's pmap with mappings to the specified 1891 * object's memory-resident pages. No further physical pages are 1892 * allocated, and no further virtual pages are retrieved from secondary 1893 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1894 * limited number of page mappings are created at the low-end of the 1895 * specified address range. (For this purpose, a superpage mapping 1896 * counts as one page mapping.) Otherwise, all resident pages within 1897 * the specified address range are mapped. 1898 */ 1899 static void 1900 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1901 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1902 { 1903 vm_offset_t start; 1904 vm_page_t p, p_start; 1905 vm_pindex_t mask, psize, threshold, tmpidx; 1906 1907 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1908 return; 1909 VM_OBJECT_RLOCK(object); 1910 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1911 VM_OBJECT_RUNLOCK(object); 1912 VM_OBJECT_WLOCK(object); 1913 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1914 pmap_object_init_pt(map->pmap, addr, object, pindex, 1915 size); 1916 VM_OBJECT_WUNLOCK(object); 1917 return; 1918 } 1919 VM_OBJECT_LOCK_DOWNGRADE(object); 1920 } 1921 1922 psize = atop(size); 1923 if (psize + pindex > object->size) { 1924 if (object->size < pindex) { 1925 VM_OBJECT_RUNLOCK(object); 1926 return; 1927 } 1928 psize = object->size - pindex; 1929 } 1930 1931 start = 0; 1932 p_start = NULL; 1933 threshold = MAX_INIT_PT; 1934 1935 p = vm_page_find_least(object, pindex); 1936 /* 1937 * Assert: the variable p is either (1) the page with the 1938 * least pindex greater than or equal to the parameter pindex 1939 * or (2) NULL. 1940 */ 1941 for (; 1942 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1943 p = TAILQ_NEXT(p, listq)) { 1944 /* 1945 * don't allow an madvise to blow away our really 1946 * free pages allocating pv entries. 1947 */ 1948 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 1949 vm_cnt.v_free_count < vm_cnt.v_free_reserved) || 1950 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 1951 tmpidx >= threshold)) { 1952 psize = tmpidx; 1953 break; 1954 } 1955 if (p->valid == VM_PAGE_BITS_ALL) { 1956 if (p_start == NULL) { 1957 start = addr + ptoa(tmpidx); 1958 p_start = p; 1959 } 1960 /* Jump ahead if a superpage mapping is possible. */ 1961 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 1962 (pagesizes[p->psind] - 1)) == 0) { 1963 mask = atop(pagesizes[p->psind]) - 1; 1964 if (tmpidx + mask < psize && 1965 vm_page_ps_is_valid(p)) { 1966 p += mask; 1967 threshold += mask; 1968 } 1969 } 1970 } else if (p_start != NULL) { 1971 pmap_enter_object(map->pmap, start, addr + 1972 ptoa(tmpidx), p_start, prot); 1973 p_start = NULL; 1974 } 1975 } 1976 if (p_start != NULL) 1977 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1978 p_start, prot); 1979 VM_OBJECT_RUNLOCK(object); 1980 } 1981 1982 /* 1983 * vm_map_protect: 1984 * 1985 * Sets the protection of the specified address 1986 * region in the target map. If "set_max" is 1987 * specified, the maximum protection is to be set; 1988 * otherwise, only the current protection is affected. 1989 */ 1990 int 1991 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1992 vm_prot_t new_prot, boolean_t set_max) 1993 { 1994 vm_map_entry_t current, entry; 1995 vm_object_t obj; 1996 struct ucred *cred; 1997 vm_prot_t old_prot; 1998 1999 if (start == end) 2000 return (KERN_SUCCESS); 2001 2002 vm_map_lock(map); 2003 2004 /* 2005 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2006 * need to fault pages into the map and will drop the map lock while 2007 * doing so, and the VM object may end up in an inconsistent state if we 2008 * update the protection on the map entry in between faults. 2009 */ 2010 vm_map_wait_busy(map); 2011 2012 VM_MAP_RANGE_CHECK(map, start, end); 2013 2014 if (vm_map_lookup_entry(map, start, &entry)) { 2015 vm_map_clip_start(map, entry, start); 2016 } else { 2017 entry = entry->next; 2018 } 2019 2020 /* 2021 * Make a first pass to check for protection violations. 2022 */ 2023 for (current = entry; current != &map->header && current->start < end; 2024 current = current->next) { 2025 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2026 continue; 2027 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2028 vm_map_unlock(map); 2029 return (KERN_INVALID_ARGUMENT); 2030 } 2031 if ((new_prot & current->max_protection) != new_prot) { 2032 vm_map_unlock(map); 2033 return (KERN_PROTECTION_FAILURE); 2034 } 2035 } 2036 2037 /* 2038 * Do an accounting pass for private read-only mappings that 2039 * now will do cow due to allowed write (e.g. debugger sets 2040 * breakpoint on text segment) 2041 */ 2042 for (current = entry; current != &map->header && current->start < end; 2043 current = current->next) { 2044 2045 vm_map_clip_end(map, current, end); 2046 2047 if (set_max || 2048 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2049 ENTRY_CHARGED(current) || 2050 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2051 continue; 2052 } 2053 2054 cred = curthread->td_ucred; 2055 obj = current->object.vm_object; 2056 2057 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2058 if (!swap_reserve(current->end - current->start)) { 2059 vm_map_unlock(map); 2060 return (KERN_RESOURCE_SHORTAGE); 2061 } 2062 crhold(cred); 2063 current->cred = cred; 2064 continue; 2065 } 2066 2067 VM_OBJECT_WLOCK(obj); 2068 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2069 VM_OBJECT_WUNLOCK(obj); 2070 continue; 2071 } 2072 2073 /* 2074 * Charge for the whole object allocation now, since 2075 * we cannot distinguish between non-charged and 2076 * charged clipped mapping of the same object later. 2077 */ 2078 KASSERT(obj->charge == 0, 2079 ("vm_map_protect: object %p overcharged (entry %p)", 2080 obj, current)); 2081 if (!swap_reserve(ptoa(obj->size))) { 2082 VM_OBJECT_WUNLOCK(obj); 2083 vm_map_unlock(map); 2084 return (KERN_RESOURCE_SHORTAGE); 2085 } 2086 2087 crhold(cred); 2088 obj->cred = cred; 2089 obj->charge = ptoa(obj->size); 2090 VM_OBJECT_WUNLOCK(obj); 2091 } 2092 2093 /* 2094 * Go back and fix up protections. [Note that clipping is not 2095 * necessary the second time.] 2096 */ 2097 for (current = entry; current != &map->header && current->start < end; 2098 current = current->next) { 2099 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2100 continue; 2101 2102 old_prot = current->protection; 2103 2104 if (set_max) 2105 current->protection = 2106 (current->max_protection = new_prot) & 2107 old_prot; 2108 else 2109 current->protection = new_prot; 2110 2111 /* 2112 * For user wired map entries, the normal lazy evaluation of 2113 * write access upgrades through soft page faults is 2114 * undesirable. Instead, immediately copy any pages that are 2115 * copy-on-write and enable write access in the physical map. 2116 */ 2117 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2118 (current->protection & VM_PROT_WRITE) != 0 && 2119 (old_prot & VM_PROT_WRITE) == 0) 2120 vm_fault_copy_entry(map, map, current, current, NULL); 2121 2122 /* 2123 * When restricting access, update the physical map. Worry 2124 * about copy-on-write here. 2125 */ 2126 if ((old_prot & ~current->protection) != 0) { 2127 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2128 VM_PROT_ALL) 2129 pmap_protect(map->pmap, current->start, 2130 current->end, 2131 current->protection & MASK(current)); 2132 #undef MASK 2133 } 2134 vm_map_simplify_entry(map, current); 2135 } 2136 vm_map_unlock(map); 2137 return (KERN_SUCCESS); 2138 } 2139 2140 /* 2141 * vm_map_madvise: 2142 * 2143 * This routine traverses a processes map handling the madvise 2144 * system call. Advisories are classified as either those effecting 2145 * the vm_map_entry structure, or those effecting the underlying 2146 * objects. 2147 */ 2148 int 2149 vm_map_madvise( 2150 vm_map_t map, 2151 vm_offset_t start, 2152 vm_offset_t end, 2153 int behav) 2154 { 2155 vm_map_entry_t current, entry; 2156 int modify_map = 0; 2157 2158 /* 2159 * Some madvise calls directly modify the vm_map_entry, in which case 2160 * we need to use an exclusive lock on the map and we need to perform 2161 * various clipping operations. Otherwise we only need a read-lock 2162 * on the map. 2163 */ 2164 switch(behav) { 2165 case MADV_NORMAL: 2166 case MADV_SEQUENTIAL: 2167 case MADV_RANDOM: 2168 case MADV_NOSYNC: 2169 case MADV_AUTOSYNC: 2170 case MADV_NOCORE: 2171 case MADV_CORE: 2172 if (start == end) 2173 return (KERN_SUCCESS); 2174 modify_map = 1; 2175 vm_map_lock(map); 2176 break; 2177 case MADV_WILLNEED: 2178 case MADV_DONTNEED: 2179 case MADV_FREE: 2180 if (start == end) 2181 return (KERN_SUCCESS); 2182 vm_map_lock_read(map); 2183 break; 2184 default: 2185 return (KERN_INVALID_ARGUMENT); 2186 } 2187 2188 /* 2189 * Locate starting entry and clip if necessary. 2190 */ 2191 VM_MAP_RANGE_CHECK(map, start, end); 2192 2193 if (vm_map_lookup_entry(map, start, &entry)) { 2194 if (modify_map) 2195 vm_map_clip_start(map, entry, start); 2196 } else { 2197 entry = entry->next; 2198 } 2199 2200 if (modify_map) { 2201 /* 2202 * madvise behaviors that are implemented in the vm_map_entry. 2203 * 2204 * We clip the vm_map_entry so that behavioral changes are 2205 * limited to the specified address range. 2206 */ 2207 for (current = entry; 2208 (current != &map->header) && (current->start < end); 2209 current = current->next 2210 ) { 2211 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2212 continue; 2213 2214 vm_map_clip_end(map, current, end); 2215 2216 switch (behav) { 2217 case MADV_NORMAL: 2218 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2219 break; 2220 case MADV_SEQUENTIAL: 2221 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2222 break; 2223 case MADV_RANDOM: 2224 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2225 break; 2226 case MADV_NOSYNC: 2227 current->eflags |= MAP_ENTRY_NOSYNC; 2228 break; 2229 case MADV_AUTOSYNC: 2230 current->eflags &= ~MAP_ENTRY_NOSYNC; 2231 break; 2232 case MADV_NOCORE: 2233 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2234 break; 2235 case MADV_CORE: 2236 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2237 break; 2238 default: 2239 break; 2240 } 2241 vm_map_simplify_entry(map, current); 2242 } 2243 vm_map_unlock(map); 2244 } else { 2245 vm_pindex_t pstart, pend; 2246 2247 /* 2248 * madvise behaviors that are implemented in the underlying 2249 * vm_object. 2250 * 2251 * Since we don't clip the vm_map_entry, we have to clip 2252 * the vm_object pindex and count. 2253 */ 2254 for (current = entry; 2255 (current != &map->header) && (current->start < end); 2256 current = current->next 2257 ) { 2258 vm_offset_t useEnd, useStart; 2259 2260 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2261 continue; 2262 2263 pstart = OFF_TO_IDX(current->offset); 2264 pend = pstart + atop(current->end - current->start); 2265 useStart = current->start; 2266 useEnd = current->end; 2267 2268 if (current->start < start) { 2269 pstart += atop(start - current->start); 2270 useStart = start; 2271 } 2272 if (current->end > end) { 2273 pend -= atop(current->end - end); 2274 useEnd = end; 2275 } 2276 2277 if (pstart >= pend) 2278 continue; 2279 2280 /* 2281 * Perform the pmap_advise() before clearing 2282 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2283 * concurrent pmap operation, such as pmap_remove(), 2284 * could clear a reference in the pmap and set 2285 * PGA_REFERENCED on the page before the pmap_advise() 2286 * had completed. Consequently, the page would appear 2287 * referenced based upon an old reference that 2288 * occurred before this pmap_advise() ran. 2289 */ 2290 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2291 pmap_advise(map->pmap, useStart, useEnd, 2292 behav); 2293 2294 vm_object_madvise(current->object.vm_object, pstart, 2295 pend, behav); 2296 2297 /* 2298 * Pre-populate paging structures in the 2299 * WILLNEED case. For wired entries, the 2300 * paging structures are already populated. 2301 */ 2302 if (behav == MADV_WILLNEED && 2303 current->wired_count == 0) { 2304 vm_map_pmap_enter(map, 2305 useStart, 2306 current->protection, 2307 current->object.vm_object, 2308 pstart, 2309 ptoa(pend - pstart), 2310 MAP_PREFAULT_MADVISE 2311 ); 2312 } 2313 } 2314 vm_map_unlock_read(map); 2315 } 2316 return (0); 2317 } 2318 2319 2320 /* 2321 * vm_map_inherit: 2322 * 2323 * Sets the inheritance of the specified address 2324 * range in the target map. Inheritance 2325 * affects how the map will be shared with 2326 * child maps at the time of vmspace_fork. 2327 */ 2328 int 2329 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2330 vm_inherit_t new_inheritance) 2331 { 2332 vm_map_entry_t entry; 2333 vm_map_entry_t temp_entry; 2334 2335 switch (new_inheritance) { 2336 case VM_INHERIT_NONE: 2337 case VM_INHERIT_COPY: 2338 case VM_INHERIT_SHARE: 2339 case VM_INHERIT_ZERO: 2340 break; 2341 default: 2342 return (KERN_INVALID_ARGUMENT); 2343 } 2344 if (start == end) 2345 return (KERN_SUCCESS); 2346 vm_map_lock(map); 2347 VM_MAP_RANGE_CHECK(map, start, end); 2348 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2349 entry = temp_entry; 2350 vm_map_clip_start(map, entry, start); 2351 } else 2352 entry = temp_entry->next; 2353 while ((entry != &map->header) && (entry->start < end)) { 2354 vm_map_clip_end(map, entry, end); 2355 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2356 new_inheritance != VM_INHERIT_ZERO) 2357 entry->inheritance = new_inheritance; 2358 vm_map_simplify_entry(map, entry); 2359 entry = entry->next; 2360 } 2361 vm_map_unlock(map); 2362 return (KERN_SUCCESS); 2363 } 2364 2365 /* 2366 * vm_map_unwire: 2367 * 2368 * Implements both kernel and user unwiring. 2369 */ 2370 int 2371 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2372 int flags) 2373 { 2374 vm_map_entry_t entry, first_entry, tmp_entry; 2375 vm_offset_t saved_start; 2376 unsigned int last_timestamp; 2377 int rv; 2378 boolean_t need_wakeup, result, user_unwire; 2379 2380 if (start == end) 2381 return (KERN_SUCCESS); 2382 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2383 vm_map_lock(map); 2384 VM_MAP_RANGE_CHECK(map, start, end); 2385 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2386 if (flags & VM_MAP_WIRE_HOLESOK) 2387 first_entry = first_entry->next; 2388 else { 2389 vm_map_unlock(map); 2390 return (KERN_INVALID_ADDRESS); 2391 } 2392 } 2393 last_timestamp = map->timestamp; 2394 entry = first_entry; 2395 while (entry != &map->header && entry->start < end) { 2396 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2397 /* 2398 * We have not yet clipped the entry. 2399 */ 2400 saved_start = (start >= entry->start) ? start : 2401 entry->start; 2402 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2403 if (vm_map_unlock_and_wait(map, 0)) { 2404 /* 2405 * Allow interruption of user unwiring? 2406 */ 2407 } 2408 vm_map_lock(map); 2409 if (last_timestamp+1 != map->timestamp) { 2410 /* 2411 * Look again for the entry because the map was 2412 * modified while it was unlocked. 2413 * Specifically, the entry may have been 2414 * clipped, merged, or deleted. 2415 */ 2416 if (!vm_map_lookup_entry(map, saved_start, 2417 &tmp_entry)) { 2418 if (flags & VM_MAP_WIRE_HOLESOK) 2419 tmp_entry = tmp_entry->next; 2420 else { 2421 if (saved_start == start) { 2422 /* 2423 * First_entry has been deleted. 2424 */ 2425 vm_map_unlock(map); 2426 return (KERN_INVALID_ADDRESS); 2427 } 2428 end = saved_start; 2429 rv = KERN_INVALID_ADDRESS; 2430 goto done; 2431 } 2432 } 2433 if (entry == first_entry) 2434 first_entry = tmp_entry; 2435 else 2436 first_entry = NULL; 2437 entry = tmp_entry; 2438 } 2439 last_timestamp = map->timestamp; 2440 continue; 2441 } 2442 vm_map_clip_start(map, entry, start); 2443 vm_map_clip_end(map, entry, end); 2444 /* 2445 * Mark the entry in case the map lock is released. (See 2446 * above.) 2447 */ 2448 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2449 entry->wiring_thread == NULL, 2450 ("owned map entry %p", entry)); 2451 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2452 entry->wiring_thread = curthread; 2453 /* 2454 * Check the map for holes in the specified region. 2455 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2456 */ 2457 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2458 (entry->end < end && (entry->next == &map->header || 2459 entry->next->start > entry->end))) { 2460 end = entry->end; 2461 rv = KERN_INVALID_ADDRESS; 2462 goto done; 2463 } 2464 /* 2465 * If system unwiring, require that the entry is system wired. 2466 */ 2467 if (!user_unwire && 2468 vm_map_entry_system_wired_count(entry) == 0) { 2469 end = entry->end; 2470 rv = KERN_INVALID_ARGUMENT; 2471 goto done; 2472 } 2473 entry = entry->next; 2474 } 2475 rv = KERN_SUCCESS; 2476 done: 2477 need_wakeup = FALSE; 2478 if (first_entry == NULL) { 2479 result = vm_map_lookup_entry(map, start, &first_entry); 2480 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2481 first_entry = first_entry->next; 2482 else 2483 KASSERT(result, ("vm_map_unwire: lookup failed")); 2484 } 2485 for (entry = first_entry; entry != &map->header && entry->start < end; 2486 entry = entry->next) { 2487 /* 2488 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2489 * space in the unwired region could have been mapped 2490 * while the map lock was dropped for draining 2491 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2492 * could be simultaneously wiring this new mapping 2493 * entry. Detect these cases and skip any entries 2494 * marked as in transition by us. 2495 */ 2496 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2497 entry->wiring_thread != curthread) { 2498 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2499 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2500 continue; 2501 } 2502 2503 if (rv == KERN_SUCCESS && (!user_unwire || 2504 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2505 if (user_unwire) 2506 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2507 if (entry->wired_count == 1) 2508 vm_map_entry_unwire(map, entry); 2509 else 2510 entry->wired_count--; 2511 } 2512 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2513 ("vm_map_unwire: in-transition flag missing %p", entry)); 2514 KASSERT(entry->wiring_thread == curthread, 2515 ("vm_map_unwire: alien wire %p", entry)); 2516 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2517 entry->wiring_thread = NULL; 2518 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2519 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2520 need_wakeup = TRUE; 2521 } 2522 vm_map_simplify_entry(map, entry); 2523 } 2524 vm_map_unlock(map); 2525 if (need_wakeup) 2526 vm_map_wakeup(map); 2527 return (rv); 2528 } 2529 2530 /* 2531 * vm_map_wire_entry_failure: 2532 * 2533 * Handle a wiring failure on the given entry. 2534 * 2535 * The map should be locked. 2536 */ 2537 static void 2538 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2539 vm_offset_t failed_addr) 2540 { 2541 2542 VM_MAP_ASSERT_LOCKED(map); 2543 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2544 entry->wired_count == 1, 2545 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2546 KASSERT(failed_addr < entry->end, 2547 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2548 2549 /* 2550 * If any pages at the start of this entry were successfully wired, 2551 * then unwire them. 2552 */ 2553 if (failed_addr > entry->start) { 2554 pmap_unwire(map->pmap, entry->start, failed_addr); 2555 vm_object_unwire(entry->object.vm_object, entry->offset, 2556 failed_addr - entry->start, PQ_ACTIVE); 2557 } 2558 2559 /* 2560 * Assign an out-of-range value to represent the failure to wire this 2561 * entry. 2562 */ 2563 entry->wired_count = -1; 2564 } 2565 2566 /* 2567 * vm_map_wire: 2568 * 2569 * Implements both kernel and user wiring. 2570 */ 2571 int 2572 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2573 int flags) 2574 { 2575 vm_map_entry_t entry, first_entry, tmp_entry; 2576 vm_offset_t faddr, saved_end, saved_start; 2577 unsigned int last_timestamp; 2578 int rv; 2579 boolean_t need_wakeup, result, user_wire; 2580 vm_prot_t prot; 2581 2582 if (start == end) 2583 return (KERN_SUCCESS); 2584 prot = 0; 2585 if (flags & VM_MAP_WIRE_WRITE) 2586 prot |= VM_PROT_WRITE; 2587 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2588 vm_map_lock(map); 2589 VM_MAP_RANGE_CHECK(map, start, end); 2590 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2591 if (flags & VM_MAP_WIRE_HOLESOK) 2592 first_entry = first_entry->next; 2593 else { 2594 vm_map_unlock(map); 2595 return (KERN_INVALID_ADDRESS); 2596 } 2597 } 2598 last_timestamp = map->timestamp; 2599 entry = first_entry; 2600 while (entry != &map->header && entry->start < end) { 2601 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2602 /* 2603 * We have not yet clipped the entry. 2604 */ 2605 saved_start = (start >= entry->start) ? start : 2606 entry->start; 2607 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2608 if (vm_map_unlock_and_wait(map, 0)) { 2609 /* 2610 * Allow interruption of user wiring? 2611 */ 2612 } 2613 vm_map_lock(map); 2614 if (last_timestamp + 1 != map->timestamp) { 2615 /* 2616 * Look again for the entry because the map was 2617 * modified while it was unlocked. 2618 * Specifically, the entry may have been 2619 * clipped, merged, or deleted. 2620 */ 2621 if (!vm_map_lookup_entry(map, saved_start, 2622 &tmp_entry)) { 2623 if (flags & VM_MAP_WIRE_HOLESOK) 2624 tmp_entry = tmp_entry->next; 2625 else { 2626 if (saved_start == start) { 2627 /* 2628 * first_entry has been deleted. 2629 */ 2630 vm_map_unlock(map); 2631 return (KERN_INVALID_ADDRESS); 2632 } 2633 end = saved_start; 2634 rv = KERN_INVALID_ADDRESS; 2635 goto done; 2636 } 2637 } 2638 if (entry == first_entry) 2639 first_entry = tmp_entry; 2640 else 2641 first_entry = NULL; 2642 entry = tmp_entry; 2643 } 2644 last_timestamp = map->timestamp; 2645 continue; 2646 } 2647 vm_map_clip_start(map, entry, start); 2648 vm_map_clip_end(map, entry, end); 2649 /* 2650 * Mark the entry in case the map lock is released. (See 2651 * above.) 2652 */ 2653 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2654 entry->wiring_thread == NULL, 2655 ("owned map entry %p", entry)); 2656 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2657 entry->wiring_thread = curthread; 2658 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2659 || (entry->protection & prot) != prot) { 2660 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2661 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2662 end = entry->end; 2663 rv = KERN_INVALID_ADDRESS; 2664 goto done; 2665 } 2666 goto next_entry; 2667 } 2668 if (entry->wired_count == 0) { 2669 entry->wired_count++; 2670 saved_start = entry->start; 2671 saved_end = entry->end; 2672 2673 /* 2674 * Release the map lock, relying on the in-transition 2675 * mark. Mark the map busy for fork. 2676 */ 2677 vm_map_busy(map); 2678 vm_map_unlock(map); 2679 2680 faddr = saved_start; 2681 do { 2682 /* 2683 * Simulate a fault to get the page and enter 2684 * it into the physical map. 2685 */ 2686 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 2687 VM_FAULT_WIRE)) != KERN_SUCCESS) 2688 break; 2689 } while ((faddr += PAGE_SIZE) < saved_end); 2690 vm_map_lock(map); 2691 vm_map_unbusy(map); 2692 if (last_timestamp + 1 != map->timestamp) { 2693 /* 2694 * Look again for the entry because the map was 2695 * modified while it was unlocked. The entry 2696 * may have been clipped, but NOT merged or 2697 * deleted. 2698 */ 2699 result = vm_map_lookup_entry(map, saved_start, 2700 &tmp_entry); 2701 KASSERT(result, ("vm_map_wire: lookup failed")); 2702 if (entry == first_entry) 2703 first_entry = tmp_entry; 2704 else 2705 first_entry = NULL; 2706 entry = tmp_entry; 2707 while (entry->end < saved_end) { 2708 /* 2709 * In case of failure, handle entries 2710 * that were not fully wired here; 2711 * fully wired entries are handled 2712 * later. 2713 */ 2714 if (rv != KERN_SUCCESS && 2715 faddr < entry->end) 2716 vm_map_wire_entry_failure(map, 2717 entry, faddr); 2718 entry = entry->next; 2719 } 2720 } 2721 last_timestamp = map->timestamp; 2722 if (rv != KERN_SUCCESS) { 2723 vm_map_wire_entry_failure(map, entry, faddr); 2724 end = entry->end; 2725 goto done; 2726 } 2727 } else if (!user_wire || 2728 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2729 entry->wired_count++; 2730 } 2731 /* 2732 * Check the map for holes in the specified region. 2733 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2734 */ 2735 next_entry: 2736 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 2737 entry->end < end && (entry->next == &map->header || 2738 entry->next->start > entry->end)) { 2739 end = entry->end; 2740 rv = KERN_INVALID_ADDRESS; 2741 goto done; 2742 } 2743 entry = entry->next; 2744 } 2745 rv = KERN_SUCCESS; 2746 done: 2747 need_wakeup = FALSE; 2748 if (first_entry == NULL) { 2749 result = vm_map_lookup_entry(map, start, &first_entry); 2750 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2751 first_entry = first_entry->next; 2752 else 2753 KASSERT(result, ("vm_map_wire: lookup failed")); 2754 } 2755 for (entry = first_entry; entry != &map->header && entry->start < end; 2756 entry = entry->next) { 2757 /* 2758 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2759 * space in the unwired region could have been mapped 2760 * while the map lock was dropped for faulting in the 2761 * pages or draining MAP_ENTRY_IN_TRANSITION. 2762 * Moreover, another thread could be simultaneously 2763 * wiring this new mapping entry. Detect these cases 2764 * and skip any entries marked as in transition not by us. 2765 */ 2766 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2767 entry->wiring_thread != curthread) { 2768 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2769 ("vm_map_wire: !HOLESOK and new/changed entry")); 2770 continue; 2771 } 2772 2773 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2774 goto next_entry_done; 2775 2776 if (rv == KERN_SUCCESS) { 2777 if (user_wire) 2778 entry->eflags |= MAP_ENTRY_USER_WIRED; 2779 } else if (entry->wired_count == -1) { 2780 /* 2781 * Wiring failed on this entry. Thus, unwiring is 2782 * unnecessary. 2783 */ 2784 entry->wired_count = 0; 2785 } else if (!user_wire || 2786 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2787 /* 2788 * Undo the wiring. Wiring succeeded on this entry 2789 * but failed on a later entry. 2790 */ 2791 if (entry->wired_count == 1) 2792 vm_map_entry_unwire(map, entry); 2793 else 2794 entry->wired_count--; 2795 } 2796 next_entry_done: 2797 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2798 ("vm_map_wire: in-transition flag missing %p", entry)); 2799 KASSERT(entry->wiring_thread == curthread, 2800 ("vm_map_wire: alien wire %p", entry)); 2801 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2802 MAP_ENTRY_WIRE_SKIPPED); 2803 entry->wiring_thread = NULL; 2804 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2805 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2806 need_wakeup = TRUE; 2807 } 2808 vm_map_simplify_entry(map, entry); 2809 } 2810 vm_map_unlock(map); 2811 if (need_wakeup) 2812 vm_map_wakeup(map); 2813 return (rv); 2814 } 2815 2816 /* 2817 * vm_map_sync 2818 * 2819 * Push any dirty cached pages in the address range to their pager. 2820 * If syncio is TRUE, dirty pages are written synchronously. 2821 * If invalidate is TRUE, any cached pages are freed as well. 2822 * 2823 * If the size of the region from start to end is zero, we are 2824 * supposed to flush all modified pages within the region containing 2825 * start. Unfortunately, a region can be split or coalesced with 2826 * neighboring regions, making it difficult to determine what the 2827 * original region was. Therefore, we approximate this requirement by 2828 * flushing the current region containing start. 2829 * 2830 * Returns an error if any part of the specified range is not mapped. 2831 */ 2832 int 2833 vm_map_sync( 2834 vm_map_t map, 2835 vm_offset_t start, 2836 vm_offset_t end, 2837 boolean_t syncio, 2838 boolean_t invalidate) 2839 { 2840 vm_map_entry_t current; 2841 vm_map_entry_t entry; 2842 vm_size_t size; 2843 vm_object_t object; 2844 vm_ooffset_t offset; 2845 unsigned int last_timestamp; 2846 boolean_t failed; 2847 2848 vm_map_lock_read(map); 2849 VM_MAP_RANGE_CHECK(map, start, end); 2850 if (!vm_map_lookup_entry(map, start, &entry)) { 2851 vm_map_unlock_read(map); 2852 return (KERN_INVALID_ADDRESS); 2853 } else if (start == end) { 2854 start = entry->start; 2855 end = entry->end; 2856 } 2857 /* 2858 * Make a first pass to check for user-wired memory and holes. 2859 */ 2860 for (current = entry; current != &map->header && current->start < end; 2861 current = current->next) { 2862 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2863 vm_map_unlock_read(map); 2864 return (KERN_INVALID_ARGUMENT); 2865 } 2866 if (end > current->end && 2867 (current->next == &map->header || 2868 current->end != current->next->start)) { 2869 vm_map_unlock_read(map); 2870 return (KERN_INVALID_ADDRESS); 2871 } 2872 } 2873 2874 if (invalidate) 2875 pmap_remove(map->pmap, start, end); 2876 failed = FALSE; 2877 2878 /* 2879 * Make a second pass, cleaning/uncaching pages from the indicated 2880 * objects as we go. 2881 */ 2882 for (current = entry; current != &map->header && current->start < end;) { 2883 offset = current->offset + (start - current->start); 2884 size = (end <= current->end ? end : current->end) - start; 2885 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2886 vm_map_t smap; 2887 vm_map_entry_t tentry; 2888 vm_size_t tsize; 2889 2890 smap = current->object.sub_map; 2891 vm_map_lock_read(smap); 2892 (void) vm_map_lookup_entry(smap, offset, &tentry); 2893 tsize = tentry->end - offset; 2894 if (tsize < size) 2895 size = tsize; 2896 object = tentry->object.vm_object; 2897 offset = tentry->offset + (offset - tentry->start); 2898 vm_map_unlock_read(smap); 2899 } else { 2900 object = current->object.vm_object; 2901 } 2902 vm_object_reference(object); 2903 last_timestamp = map->timestamp; 2904 vm_map_unlock_read(map); 2905 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2906 failed = TRUE; 2907 start += size; 2908 vm_object_deallocate(object); 2909 vm_map_lock_read(map); 2910 if (last_timestamp == map->timestamp || 2911 !vm_map_lookup_entry(map, start, ¤t)) 2912 current = current->next; 2913 } 2914 2915 vm_map_unlock_read(map); 2916 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2917 } 2918 2919 /* 2920 * vm_map_entry_unwire: [ internal use only ] 2921 * 2922 * Make the region specified by this entry pageable. 2923 * 2924 * The map in question should be locked. 2925 * [This is the reason for this routine's existence.] 2926 */ 2927 static void 2928 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2929 { 2930 2931 VM_MAP_ASSERT_LOCKED(map); 2932 KASSERT(entry->wired_count > 0, 2933 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 2934 pmap_unwire(map->pmap, entry->start, entry->end); 2935 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 2936 entry->start, PQ_ACTIVE); 2937 entry->wired_count = 0; 2938 } 2939 2940 static void 2941 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2942 { 2943 2944 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2945 vm_object_deallocate(entry->object.vm_object); 2946 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2947 } 2948 2949 /* 2950 * vm_map_entry_delete: [ internal use only ] 2951 * 2952 * Deallocate the given entry from the target map. 2953 */ 2954 static void 2955 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2956 { 2957 vm_object_t object; 2958 vm_pindex_t offidxstart, offidxend, count, size1; 2959 vm_size_t size; 2960 2961 vm_map_entry_unlink(map, entry); 2962 object = entry->object.vm_object; 2963 2964 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 2965 MPASS(entry->cred == NULL); 2966 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 2967 MPASS(object == NULL); 2968 vm_map_entry_deallocate(entry, map->system_map); 2969 return; 2970 } 2971 2972 size = entry->end - entry->start; 2973 map->size -= size; 2974 2975 if (entry->cred != NULL) { 2976 swap_release_by_cred(size, entry->cred); 2977 crfree(entry->cred); 2978 } 2979 2980 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2981 (object != NULL)) { 2982 KASSERT(entry->cred == NULL || object->cred == NULL || 2983 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 2984 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 2985 count = atop(size); 2986 offidxstart = OFF_TO_IDX(entry->offset); 2987 offidxend = offidxstart + count; 2988 VM_OBJECT_WLOCK(object); 2989 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 2990 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2991 object == kernel_object || object == kmem_object)) { 2992 vm_object_collapse(object); 2993 2994 /* 2995 * The option OBJPR_NOTMAPPED can be passed here 2996 * because vm_map_delete() already performed 2997 * pmap_remove() on the only mapping to this range 2998 * of pages. 2999 */ 3000 vm_object_page_remove(object, offidxstart, offidxend, 3001 OBJPR_NOTMAPPED); 3002 if (object->type == OBJT_SWAP) 3003 swap_pager_freespace(object, offidxstart, 3004 count); 3005 if (offidxend >= object->size && 3006 offidxstart < object->size) { 3007 size1 = object->size; 3008 object->size = offidxstart; 3009 if (object->cred != NULL) { 3010 size1 -= object->size; 3011 KASSERT(object->charge >= ptoa(size1), 3012 ("object %p charge < 0", object)); 3013 swap_release_by_cred(ptoa(size1), 3014 object->cred); 3015 object->charge -= ptoa(size1); 3016 } 3017 } 3018 } 3019 VM_OBJECT_WUNLOCK(object); 3020 } else 3021 entry->object.vm_object = NULL; 3022 if (map->system_map) 3023 vm_map_entry_deallocate(entry, TRUE); 3024 else { 3025 entry->next = curthread->td_map_def_user; 3026 curthread->td_map_def_user = entry; 3027 } 3028 } 3029 3030 /* 3031 * vm_map_delete: [ internal use only ] 3032 * 3033 * Deallocates the given address range from the target 3034 * map. 3035 */ 3036 int 3037 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3038 { 3039 vm_map_entry_t entry; 3040 vm_map_entry_t first_entry; 3041 3042 VM_MAP_ASSERT_LOCKED(map); 3043 if (start == end) 3044 return (KERN_SUCCESS); 3045 3046 /* 3047 * Find the start of the region, and clip it 3048 */ 3049 if (!vm_map_lookup_entry(map, start, &first_entry)) 3050 entry = first_entry->next; 3051 else { 3052 entry = first_entry; 3053 vm_map_clip_start(map, entry, start); 3054 } 3055 3056 /* 3057 * Step through all entries in this region 3058 */ 3059 while ((entry != &map->header) && (entry->start < end)) { 3060 vm_map_entry_t next; 3061 3062 /* 3063 * Wait for wiring or unwiring of an entry to complete. 3064 * Also wait for any system wirings to disappear on 3065 * user maps. 3066 */ 3067 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3068 (vm_map_pmap(map) != kernel_pmap && 3069 vm_map_entry_system_wired_count(entry) != 0)) { 3070 unsigned int last_timestamp; 3071 vm_offset_t saved_start; 3072 vm_map_entry_t tmp_entry; 3073 3074 saved_start = entry->start; 3075 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3076 last_timestamp = map->timestamp; 3077 (void) vm_map_unlock_and_wait(map, 0); 3078 vm_map_lock(map); 3079 if (last_timestamp + 1 != map->timestamp) { 3080 /* 3081 * Look again for the entry because the map was 3082 * modified while it was unlocked. 3083 * Specifically, the entry may have been 3084 * clipped, merged, or deleted. 3085 */ 3086 if (!vm_map_lookup_entry(map, saved_start, 3087 &tmp_entry)) 3088 entry = tmp_entry->next; 3089 else { 3090 entry = tmp_entry; 3091 vm_map_clip_start(map, entry, 3092 saved_start); 3093 } 3094 } 3095 continue; 3096 } 3097 vm_map_clip_end(map, entry, end); 3098 3099 next = entry->next; 3100 3101 /* 3102 * Unwire before removing addresses from the pmap; otherwise, 3103 * unwiring will put the entries back in the pmap. 3104 */ 3105 if (entry->wired_count != 0) { 3106 vm_map_entry_unwire(map, entry); 3107 } 3108 3109 pmap_remove(map->pmap, entry->start, entry->end); 3110 3111 /* 3112 * Delete the entry only after removing all pmap 3113 * entries pointing to its pages. (Otherwise, its 3114 * page frames may be reallocated, and any modify bits 3115 * will be set in the wrong object!) 3116 */ 3117 vm_map_entry_delete(map, entry); 3118 entry = next; 3119 } 3120 return (KERN_SUCCESS); 3121 } 3122 3123 /* 3124 * vm_map_remove: 3125 * 3126 * Remove the given address range from the target map. 3127 * This is the exported form of vm_map_delete. 3128 */ 3129 int 3130 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3131 { 3132 int result; 3133 3134 vm_map_lock(map); 3135 VM_MAP_RANGE_CHECK(map, start, end); 3136 result = vm_map_delete(map, start, end); 3137 vm_map_unlock(map); 3138 return (result); 3139 } 3140 3141 /* 3142 * vm_map_check_protection: 3143 * 3144 * Assert that the target map allows the specified privilege on the 3145 * entire address region given. The entire region must be allocated. 3146 * 3147 * WARNING! This code does not and should not check whether the 3148 * contents of the region is accessible. For example a smaller file 3149 * might be mapped into a larger address space. 3150 * 3151 * NOTE! This code is also called by munmap(). 3152 * 3153 * The map must be locked. A read lock is sufficient. 3154 */ 3155 boolean_t 3156 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3157 vm_prot_t protection) 3158 { 3159 vm_map_entry_t entry; 3160 vm_map_entry_t tmp_entry; 3161 3162 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3163 return (FALSE); 3164 entry = tmp_entry; 3165 3166 while (start < end) { 3167 if (entry == &map->header) 3168 return (FALSE); 3169 /* 3170 * No holes allowed! 3171 */ 3172 if (start < entry->start) 3173 return (FALSE); 3174 /* 3175 * Check protection associated with entry. 3176 */ 3177 if ((entry->protection & protection) != protection) 3178 return (FALSE); 3179 /* go to next entry */ 3180 start = entry->end; 3181 entry = entry->next; 3182 } 3183 return (TRUE); 3184 } 3185 3186 /* 3187 * vm_map_copy_entry: 3188 * 3189 * Copies the contents of the source entry to the destination 3190 * entry. The entries *must* be aligned properly. 3191 */ 3192 static void 3193 vm_map_copy_entry( 3194 vm_map_t src_map, 3195 vm_map_t dst_map, 3196 vm_map_entry_t src_entry, 3197 vm_map_entry_t dst_entry, 3198 vm_ooffset_t *fork_charge) 3199 { 3200 vm_object_t src_object; 3201 vm_map_entry_t fake_entry; 3202 vm_offset_t size; 3203 struct ucred *cred; 3204 int charged; 3205 3206 VM_MAP_ASSERT_LOCKED(dst_map); 3207 3208 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3209 return; 3210 3211 if (src_entry->wired_count == 0 || 3212 (src_entry->protection & VM_PROT_WRITE) == 0) { 3213 /* 3214 * If the source entry is marked needs_copy, it is already 3215 * write-protected. 3216 */ 3217 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3218 (src_entry->protection & VM_PROT_WRITE) != 0) { 3219 pmap_protect(src_map->pmap, 3220 src_entry->start, 3221 src_entry->end, 3222 src_entry->protection & ~VM_PROT_WRITE); 3223 } 3224 3225 /* 3226 * Make a copy of the object. 3227 */ 3228 size = src_entry->end - src_entry->start; 3229 if ((src_object = src_entry->object.vm_object) != NULL) { 3230 VM_OBJECT_WLOCK(src_object); 3231 charged = ENTRY_CHARGED(src_entry); 3232 if (src_object->handle == NULL && 3233 (src_object->type == OBJT_DEFAULT || 3234 src_object->type == OBJT_SWAP)) { 3235 vm_object_collapse(src_object); 3236 if ((src_object->flags & (OBJ_NOSPLIT | 3237 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3238 vm_object_split(src_entry); 3239 src_object = 3240 src_entry->object.vm_object; 3241 } 3242 } 3243 vm_object_reference_locked(src_object); 3244 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3245 if (src_entry->cred != NULL && 3246 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3247 KASSERT(src_object->cred == NULL, 3248 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3249 src_object)); 3250 src_object->cred = src_entry->cred; 3251 src_object->charge = size; 3252 } 3253 VM_OBJECT_WUNLOCK(src_object); 3254 dst_entry->object.vm_object = src_object; 3255 if (charged) { 3256 cred = curthread->td_ucred; 3257 crhold(cred); 3258 dst_entry->cred = cred; 3259 *fork_charge += size; 3260 if (!(src_entry->eflags & 3261 MAP_ENTRY_NEEDS_COPY)) { 3262 crhold(cred); 3263 src_entry->cred = cred; 3264 *fork_charge += size; 3265 } 3266 } 3267 src_entry->eflags |= MAP_ENTRY_COW | 3268 MAP_ENTRY_NEEDS_COPY; 3269 dst_entry->eflags |= MAP_ENTRY_COW | 3270 MAP_ENTRY_NEEDS_COPY; 3271 dst_entry->offset = src_entry->offset; 3272 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3273 /* 3274 * MAP_ENTRY_VN_WRITECNT cannot 3275 * indicate write reference from 3276 * src_entry, since the entry is 3277 * marked as needs copy. Allocate a 3278 * fake entry that is used to 3279 * decrement object->un_pager.vnp.writecount 3280 * at the appropriate time. Attach 3281 * fake_entry to the deferred list. 3282 */ 3283 fake_entry = vm_map_entry_create(dst_map); 3284 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3285 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3286 vm_object_reference(src_object); 3287 fake_entry->object.vm_object = src_object; 3288 fake_entry->start = src_entry->start; 3289 fake_entry->end = src_entry->end; 3290 fake_entry->next = curthread->td_map_def_user; 3291 curthread->td_map_def_user = fake_entry; 3292 } 3293 3294 pmap_copy(dst_map->pmap, src_map->pmap, 3295 dst_entry->start, dst_entry->end - dst_entry->start, 3296 src_entry->start); 3297 } else { 3298 dst_entry->object.vm_object = NULL; 3299 dst_entry->offset = 0; 3300 if (src_entry->cred != NULL) { 3301 dst_entry->cred = curthread->td_ucred; 3302 crhold(dst_entry->cred); 3303 *fork_charge += size; 3304 } 3305 } 3306 } else { 3307 /* 3308 * We don't want to make writeable wired pages copy-on-write. 3309 * Immediately copy these pages into the new map by simulating 3310 * page faults. The new pages are pageable. 3311 */ 3312 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3313 fork_charge); 3314 } 3315 } 3316 3317 /* 3318 * vmspace_map_entry_forked: 3319 * Update the newly-forked vmspace each time a map entry is inherited 3320 * or copied. The values for vm_dsize and vm_tsize are approximate 3321 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3322 */ 3323 static void 3324 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3325 vm_map_entry_t entry) 3326 { 3327 vm_size_t entrysize; 3328 vm_offset_t newend; 3329 3330 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3331 return; 3332 entrysize = entry->end - entry->start; 3333 vm2->vm_map.size += entrysize; 3334 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3335 vm2->vm_ssize += btoc(entrysize); 3336 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3337 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3338 newend = MIN(entry->end, 3339 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3340 vm2->vm_dsize += btoc(newend - entry->start); 3341 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3342 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3343 newend = MIN(entry->end, 3344 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3345 vm2->vm_tsize += btoc(newend - entry->start); 3346 } 3347 } 3348 3349 /* 3350 * vmspace_fork: 3351 * Create a new process vmspace structure and vm_map 3352 * based on those of an existing process. The new map 3353 * is based on the old map, according to the inheritance 3354 * values on the regions in that map. 3355 * 3356 * XXX It might be worth coalescing the entries added to the new vmspace. 3357 * 3358 * The source map must not be locked. 3359 */ 3360 struct vmspace * 3361 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3362 { 3363 struct vmspace *vm2; 3364 vm_map_t new_map, old_map; 3365 vm_map_entry_t new_entry, old_entry; 3366 vm_object_t object; 3367 int locked; 3368 vm_inherit_t inh; 3369 3370 old_map = &vm1->vm_map; 3371 /* Copy immutable fields of vm1 to vm2. */ 3372 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); 3373 if (vm2 == NULL) 3374 return (NULL); 3375 vm2->vm_taddr = vm1->vm_taddr; 3376 vm2->vm_daddr = vm1->vm_daddr; 3377 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3378 vm_map_lock(old_map); 3379 if (old_map->busy) 3380 vm_map_wait_busy(old_map); 3381 new_map = &vm2->vm_map; 3382 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3383 KASSERT(locked, ("vmspace_fork: lock failed")); 3384 3385 old_entry = old_map->header.next; 3386 3387 while (old_entry != &old_map->header) { 3388 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3389 panic("vm_map_fork: encountered a submap"); 3390 3391 inh = old_entry->inheritance; 3392 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3393 inh != VM_INHERIT_NONE) 3394 inh = VM_INHERIT_COPY; 3395 3396 switch (inh) { 3397 case VM_INHERIT_NONE: 3398 break; 3399 3400 case VM_INHERIT_SHARE: 3401 /* 3402 * Clone the entry, creating the shared object if necessary. 3403 */ 3404 object = old_entry->object.vm_object; 3405 if (object == NULL) { 3406 object = vm_object_allocate(OBJT_DEFAULT, 3407 atop(old_entry->end - old_entry->start)); 3408 old_entry->object.vm_object = object; 3409 old_entry->offset = 0; 3410 if (old_entry->cred != NULL) { 3411 object->cred = old_entry->cred; 3412 object->charge = old_entry->end - 3413 old_entry->start; 3414 old_entry->cred = NULL; 3415 } 3416 } 3417 3418 /* 3419 * Add the reference before calling vm_object_shadow 3420 * to insure that a shadow object is created. 3421 */ 3422 vm_object_reference(object); 3423 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3424 vm_object_shadow(&old_entry->object.vm_object, 3425 &old_entry->offset, 3426 old_entry->end - old_entry->start); 3427 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3428 /* Transfer the second reference too. */ 3429 vm_object_reference( 3430 old_entry->object.vm_object); 3431 3432 /* 3433 * As in vm_map_simplify_entry(), the 3434 * vnode lock will not be acquired in 3435 * this call to vm_object_deallocate(). 3436 */ 3437 vm_object_deallocate(object); 3438 object = old_entry->object.vm_object; 3439 } 3440 VM_OBJECT_WLOCK(object); 3441 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3442 if (old_entry->cred != NULL) { 3443 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3444 object->cred = old_entry->cred; 3445 object->charge = old_entry->end - old_entry->start; 3446 old_entry->cred = NULL; 3447 } 3448 3449 /* 3450 * Assert the correct state of the vnode 3451 * v_writecount while the object is locked, to 3452 * not relock it later for the assertion 3453 * correctness. 3454 */ 3455 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3456 object->type == OBJT_VNODE) { 3457 KASSERT(((struct vnode *)object->handle)-> 3458 v_writecount > 0, 3459 ("vmspace_fork: v_writecount %p", object)); 3460 KASSERT(object->un_pager.vnp.writemappings > 0, 3461 ("vmspace_fork: vnp.writecount %p", 3462 object)); 3463 } 3464 VM_OBJECT_WUNLOCK(object); 3465 3466 /* 3467 * Clone the entry, referencing the shared object. 3468 */ 3469 new_entry = vm_map_entry_create(new_map); 3470 *new_entry = *old_entry; 3471 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3472 MAP_ENTRY_IN_TRANSITION); 3473 new_entry->wiring_thread = NULL; 3474 new_entry->wired_count = 0; 3475 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3476 vnode_pager_update_writecount(object, 3477 new_entry->start, new_entry->end); 3478 } 3479 3480 /* 3481 * Insert the entry into the new map -- we know we're 3482 * inserting at the end of the new map. 3483 */ 3484 vm_map_entry_link(new_map, new_map->header.prev, 3485 new_entry); 3486 vmspace_map_entry_forked(vm1, vm2, new_entry); 3487 3488 /* 3489 * Update the physical map 3490 */ 3491 pmap_copy(new_map->pmap, old_map->pmap, 3492 new_entry->start, 3493 (old_entry->end - old_entry->start), 3494 old_entry->start); 3495 break; 3496 3497 case VM_INHERIT_COPY: 3498 /* 3499 * Clone the entry and link into the map. 3500 */ 3501 new_entry = vm_map_entry_create(new_map); 3502 *new_entry = *old_entry; 3503 /* 3504 * Copied entry is COW over the old object. 3505 */ 3506 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3507 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3508 new_entry->wiring_thread = NULL; 3509 new_entry->wired_count = 0; 3510 new_entry->object.vm_object = NULL; 3511 new_entry->cred = NULL; 3512 vm_map_entry_link(new_map, new_map->header.prev, 3513 new_entry); 3514 vmspace_map_entry_forked(vm1, vm2, new_entry); 3515 vm_map_copy_entry(old_map, new_map, old_entry, 3516 new_entry, fork_charge); 3517 break; 3518 3519 case VM_INHERIT_ZERO: 3520 /* 3521 * Create a new anonymous mapping entry modelled from 3522 * the old one. 3523 */ 3524 new_entry = vm_map_entry_create(new_map); 3525 memset(new_entry, 0, sizeof(*new_entry)); 3526 3527 new_entry->start = old_entry->start; 3528 new_entry->end = old_entry->end; 3529 new_entry->eflags = old_entry->eflags & 3530 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3531 MAP_ENTRY_VN_WRITECNT); 3532 new_entry->protection = old_entry->protection; 3533 new_entry->max_protection = old_entry->max_protection; 3534 new_entry->inheritance = VM_INHERIT_ZERO; 3535 3536 vm_map_entry_link(new_map, new_map->header.prev, 3537 new_entry); 3538 vmspace_map_entry_forked(vm1, vm2, new_entry); 3539 3540 new_entry->cred = curthread->td_ucred; 3541 crhold(new_entry->cred); 3542 *fork_charge += (new_entry->end - new_entry->start); 3543 3544 break; 3545 } 3546 old_entry = old_entry->next; 3547 } 3548 /* 3549 * Use inlined vm_map_unlock() to postpone handling the deferred 3550 * map entries, which cannot be done until both old_map and 3551 * new_map locks are released. 3552 */ 3553 sx_xunlock(&old_map->lock); 3554 sx_xunlock(&new_map->lock); 3555 vm_map_process_deferred(); 3556 3557 return (vm2); 3558 } 3559 3560 /* 3561 * Create a process's stack for exec_new_vmspace(). This function is never 3562 * asked to wire the newly created stack. 3563 */ 3564 int 3565 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3566 vm_prot_t prot, vm_prot_t max, int cow) 3567 { 3568 vm_size_t growsize, init_ssize; 3569 rlim_t vmemlim; 3570 int rv; 3571 3572 MPASS((map->flags & MAP_WIREFUTURE) == 0); 3573 growsize = sgrowsiz; 3574 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3575 vm_map_lock(map); 3576 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3577 /* If we would blow our VMEM resource limit, no go */ 3578 if (map->size + init_ssize > vmemlim) { 3579 rv = KERN_NO_SPACE; 3580 goto out; 3581 } 3582 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3583 max, cow); 3584 out: 3585 vm_map_unlock(map); 3586 return (rv); 3587 } 3588 3589 static int stack_guard_page = 1; 3590 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 3591 &stack_guard_page, 0, 3592 "Specifies the number of guard pages for a stack that grows"); 3593 3594 static int 3595 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3596 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3597 { 3598 vm_map_entry_t new_entry, prev_entry; 3599 vm_offset_t bot, gap_bot, gap_top, top; 3600 vm_size_t init_ssize, sgp; 3601 int orient, rv; 3602 3603 /* 3604 * The stack orientation is piggybacked with the cow argument. 3605 * Extract it into orient and mask the cow argument so that we 3606 * don't pass it around further. 3607 */ 3608 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 3609 KASSERT(orient != 0, ("No stack grow direction")); 3610 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 3611 ("bi-dir stack")); 3612 3613 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 3614 if (addrbos < vm_map_min(map) || 3615 addrbos > vm_map_max(map) || 3616 addrbos + max_ssize < addrbos || 3617 sgp >= max_ssize) 3618 return (KERN_NO_SPACE); 3619 3620 init_ssize = growsize; 3621 if (max_ssize < init_ssize + sgp) 3622 init_ssize = max_ssize - sgp; 3623 3624 /* If addr is already mapped, no go */ 3625 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3626 return (KERN_NO_SPACE); 3627 3628 /* 3629 * If we can't accommodate max_ssize in the current mapping, no go. 3630 */ 3631 if ((prev_entry->next != &map->header) && 3632 (prev_entry->next->start < addrbos + max_ssize)) 3633 return (KERN_NO_SPACE); 3634 3635 /* 3636 * We initially map a stack of only init_ssize. We will grow as 3637 * needed later. Depending on the orientation of the stack (i.e. 3638 * the grow direction) we either map at the top of the range, the 3639 * bottom of the range or in the middle. 3640 * 3641 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3642 * and cow to be 0. Possibly we should eliminate these as input 3643 * parameters, and just pass these values here in the insert call. 3644 */ 3645 if (orient == MAP_STACK_GROWS_DOWN) { 3646 bot = addrbos + max_ssize - init_ssize; 3647 top = bot + init_ssize; 3648 gap_bot = addrbos; 3649 gap_top = bot; 3650 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 3651 bot = addrbos; 3652 top = bot + init_ssize; 3653 gap_bot = top; 3654 gap_top = addrbos + max_ssize; 3655 } 3656 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3657 if (rv != KERN_SUCCESS) 3658 return (rv); 3659 new_entry = prev_entry->next; 3660 KASSERT(new_entry->end == top || new_entry->start == bot, 3661 ("Bad entry start/end for new stack entry")); 3662 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3663 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3664 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3665 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3666 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3667 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3668 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 3669 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 3670 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 3671 if (rv != KERN_SUCCESS) 3672 (void)vm_map_delete(map, bot, top); 3673 return (rv); 3674 } 3675 3676 /* 3677 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 3678 * successfully grow the stack. 3679 */ 3680 static int 3681 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 3682 { 3683 vm_map_entry_t stack_entry; 3684 struct proc *p; 3685 struct vmspace *vm; 3686 struct ucred *cred; 3687 vm_offset_t gap_end, gap_start, grow_start; 3688 size_t grow_amount, guard, max_grow; 3689 rlim_t lmemlim, stacklim, vmemlim; 3690 int rv, rv1; 3691 bool gap_deleted, grow_down, is_procstack; 3692 #ifdef notyet 3693 uint64_t limit; 3694 #endif 3695 #ifdef RACCT 3696 int error; 3697 #endif 3698 3699 p = curproc; 3700 vm = p->p_vmspace; 3701 MPASS(map == &p->p_vmspace->vm_map); 3702 MPASS(!map->system_map); 3703 3704 guard = stack_guard_page * PAGE_SIZE; 3705 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 3706 stacklim = lim_cur(curthread, RLIMIT_STACK); 3707 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3708 retry: 3709 /* If addr is not in a hole for a stack grow area, no need to grow. */ 3710 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 3711 return (KERN_FAILURE); 3712 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 3713 return (KERN_SUCCESS); 3714 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 3715 stack_entry = gap_entry->next; 3716 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 3717 stack_entry->start != gap_entry->end) 3718 return (KERN_FAILURE); 3719 grow_amount = round_page(stack_entry->start - addr); 3720 grow_down = true; 3721 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 3722 stack_entry = gap_entry->prev; 3723 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 3724 stack_entry->end != gap_entry->start) 3725 return (KERN_FAILURE); 3726 grow_amount = round_page(addr + 1 - stack_entry->end); 3727 grow_down = false; 3728 } else { 3729 return (KERN_FAILURE); 3730 } 3731 max_grow = gap_entry->end - gap_entry->start; 3732 if (guard > max_grow) 3733 return (KERN_NO_SPACE); 3734 max_grow -= guard; 3735 if (grow_amount > max_grow) 3736 return (KERN_NO_SPACE); 3737 3738 /* 3739 * If this is the main process stack, see if we're over the stack 3740 * limit. 3741 */ 3742 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 3743 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 3744 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 3745 return (KERN_NO_SPACE); 3746 3747 #ifdef RACCT 3748 if (racct_enable) { 3749 PROC_LOCK(p); 3750 if (is_procstack && racct_set(p, RACCT_STACK, 3751 ctob(vm->vm_ssize) + grow_amount)) { 3752 PROC_UNLOCK(p); 3753 return (KERN_NO_SPACE); 3754 } 3755 PROC_UNLOCK(p); 3756 } 3757 #endif 3758 3759 grow_amount = roundup(grow_amount, sgrowsiz); 3760 if (grow_amount > max_grow) 3761 grow_amount = max_grow; 3762 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3763 grow_amount = trunc_page((vm_size_t)stacklim) - 3764 ctob(vm->vm_ssize); 3765 } 3766 3767 #ifdef notyet 3768 PROC_LOCK(p); 3769 limit = racct_get_available(p, RACCT_STACK); 3770 PROC_UNLOCK(p); 3771 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3772 grow_amount = limit - ctob(vm->vm_ssize); 3773 #endif 3774 3775 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 3776 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3777 rv = KERN_NO_SPACE; 3778 goto out; 3779 } 3780 #ifdef RACCT 3781 if (racct_enable) { 3782 PROC_LOCK(p); 3783 if (racct_set(p, RACCT_MEMLOCK, 3784 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3785 PROC_UNLOCK(p); 3786 rv = KERN_NO_SPACE; 3787 goto out; 3788 } 3789 PROC_UNLOCK(p); 3790 } 3791 #endif 3792 } 3793 3794 /* If we would blow our VMEM resource limit, no go */ 3795 if (map->size + grow_amount > vmemlim) { 3796 rv = KERN_NO_SPACE; 3797 goto out; 3798 } 3799 #ifdef RACCT 3800 if (racct_enable) { 3801 PROC_LOCK(p); 3802 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3803 PROC_UNLOCK(p); 3804 rv = KERN_NO_SPACE; 3805 goto out; 3806 } 3807 PROC_UNLOCK(p); 3808 } 3809 #endif 3810 3811 if (vm_map_lock_upgrade(map)) { 3812 gap_entry = NULL; 3813 vm_map_lock_read(map); 3814 goto retry; 3815 } 3816 3817 if (grow_down) { 3818 grow_start = gap_entry->end - grow_amount; 3819 if (gap_entry->start + grow_amount == gap_entry->end) { 3820 gap_start = gap_entry->start; 3821 gap_end = gap_entry->end; 3822 vm_map_entry_delete(map, gap_entry); 3823 gap_deleted = true; 3824 } else { 3825 MPASS(gap_entry->start < gap_entry->end - grow_amount); 3826 gap_entry->end -= grow_amount; 3827 vm_map_entry_resize_free(map, gap_entry); 3828 gap_deleted = false; 3829 } 3830 rv = vm_map_insert(map, NULL, 0, grow_start, 3831 grow_start + grow_amount, 3832 stack_entry->protection, stack_entry->max_protection, 3833 MAP_STACK_GROWS_DOWN); 3834 if (rv != KERN_SUCCESS) { 3835 if (gap_deleted) { 3836 rv1 = vm_map_insert(map, NULL, 0, gap_start, 3837 gap_end, VM_PROT_NONE, VM_PROT_NONE, 3838 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 3839 MPASS(rv1 == KERN_SUCCESS); 3840 } else { 3841 gap_entry->end += grow_amount; 3842 vm_map_entry_resize_free(map, gap_entry); 3843 } 3844 } 3845 } else { 3846 grow_start = stack_entry->end; 3847 cred = stack_entry->cred; 3848 if (cred == NULL && stack_entry->object.vm_object != NULL) 3849 cred = stack_entry->object.vm_object->cred; 3850 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3851 rv = KERN_NO_SPACE; 3852 /* Grow the underlying object if applicable. */ 3853 else if (stack_entry->object.vm_object == NULL || 3854 vm_object_coalesce(stack_entry->object.vm_object, 3855 stack_entry->offset, 3856 (vm_size_t)(stack_entry->end - stack_entry->start), 3857 (vm_size_t)grow_amount, cred != NULL)) { 3858 if (gap_entry->start + grow_amount == gap_entry->end) 3859 vm_map_entry_delete(map, gap_entry); 3860 else 3861 gap_entry->start += grow_amount; 3862 stack_entry->end += grow_amount; 3863 map->size += grow_amount; 3864 vm_map_entry_resize_free(map, stack_entry); 3865 rv = KERN_SUCCESS; 3866 } else 3867 rv = KERN_FAILURE; 3868 } 3869 if (rv == KERN_SUCCESS && is_procstack) 3870 vm->vm_ssize += btoc(grow_amount); 3871 3872 /* 3873 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3874 */ 3875 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 3876 vm_map_unlock(map); 3877 vm_map_wire(map, grow_start, grow_start + grow_amount, 3878 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 3879 vm_map_lock_read(map); 3880 } else 3881 vm_map_lock_downgrade(map); 3882 3883 out: 3884 #ifdef RACCT 3885 if (racct_enable && rv != KERN_SUCCESS) { 3886 PROC_LOCK(p); 3887 error = racct_set(p, RACCT_VMEM, map->size); 3888 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3889 if (!old_mlock) { 3890 error = racct_set(p, RACCT_MEMLOCK, 3891 ptoa(pmap_wired_count(map->pmap))); 3892 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3893 } 3894 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3895 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3896 PROC_UNLOCK(p); 3897 } 3898 #endif 3899 3900 return (rv); 3901 } 3902 3903 /* 3904 * Unshare the specified VM space for exec. If other processes are 3905 * mapped to it, then create a new one. The new vmspace is null. 3906 */ 3907 int 3908 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3909 { 3910 struct vmspace *oldvmspace = p->p_vmspace; 3911 struct vmspace *newvmspace; 3912 3913 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3914 ("vmspace_exec recursed")); 3915 newvmspace = vmspace_alloc(minuser, maxuser, NULL); 3916 if (newvmspace == NULL) 3917 return (ENOMEM); 3918 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3919 /* 3920 * This code is written like this for prototype purposes. The 3921 * goal is to avoid running down the vmspace here, but let the 3922 * other process's that are still using the vmspace to finally 3923 * run it down. Even though there is little or no chance of blocking 3924 * here, it is a good idea to keep this form for future mods. 3925 */ 3926 PROC_VMSPACE_LOCK(p); 3927 p->p_vmspace = newvmspace; 3928 PROC_VMSPACE_UNLOCK(p); 3929 if (p == curthread->td_proc) 3930 pmap_activate(curthread); 3931 curthread->td_pflags |= TDP_EXECVMSPC; 3932 return (0); 3933 } 3934 3935 /* 3936 * Unshare the specified VM space for forcing COW. This 3937 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3938 */ 3939 int 3940 vmspace_unshare(struct proc *p) 3941 { 3942 struct vmspace *oldvmspace = p->p_vmspace; 3943 struct vmspace *newvmspace; 3944 vm_ooffset_t fork_charge; 3945 3946 if (oldvmspace->vm_refcnt == 1) 3947 return (0); 3948 fork_charge = 0; 3949 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 3950 if (newvmspace == NULL) 3951 return (ENOMEM); 3952 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 3953 vmspace_free(newvmspace); 3954 return (ENOMEM); 3955 } 3956 PROC_VMSPACE_LOCK(p); 3957 p->p_vmspace = newvmspace; 3958 PROC_VMSPACE_UNLOCK(p); 3959 if (p == curthread->td_proc) 3960 pmap_activate(curthread); 3961 vmspace_free(oldvmspace); 3962 return (0); 3963 } 3964 3965 /* 3966 * vm_map_lookup: 3967 * 3968 * Finds the VM object, offset, and 3969 * protection for a given virtual address in the 3970 * specified map, assuming a page fault of the 3971 * type specified. 3972 * 3973 * Leaves the map in question locked for read; return 3974 * values are guaranteed until a vm_map_lookup_done 3975 * call is performed. Note that the map argument 3976 * is in/out; the returned map must be used in 3977 * the call to vm_map_lookup_done. 3978 * 3979 * A handle (out_entry) is returned for use in 3980 * vm_map_lookup_done, to make that fast. 3981 * 3982 * If a lookup is requested with "write protection" 3983 * specified, the map may be changed to perform virtual 3984 * copying operations, although the data referenced will 3985 * remain the same. 3986 */ 3987 int 3988 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3989 vm_offset_t vaddr, 3990 vm_prot_t fault_typea, 3991 vm_map_entry_t *out_entry, /* OUT */ 3992 vm_object_t *object, /* OUT */ 3993 vm_pindex_t *pindex, /* OUT */ 3994 vm_prot_t *out_prot, /* OUT */ 3995 boolean_t *wired) /* OUT */ 3996 { 3997 vm_map_entry_t entry; 3998 vm_map_t map = *var_map; 3999 vm_prot_t prot; 4000 vm_prot_t fault_type = fault_typea; 4001 vm_object_t eobject; 4002 vm_size_t size; 4003 struct ucred *cred; 4004 4005 RetryLookup: 4006 4007 vm_map_lock_read(map); 4008 4009 RetryLookupLocked: 4010 /* 4011 * Lookup the faulting address. 4012 */ 4013 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4014 vm_map_unlock_read(map); 4015 return (KERN_INVALID_ADDRESS); 4016 } 4017 4018 entry = *out_entry; 4019 4020 /* 4021 * Handle submaps. 4022 */ 4023 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4024 vm_map_t old_map = map; 4025 4026 *var_map = map = entry->object.sub_map; 4027 vm_map_unlock_read(old_map); 4028 goto RetryLookup; 4029 } 4030 4031 /* 4032 * Check whether this task is allowed to have this page. 4033 */ 4034 prot = entry->protection; 4035 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4036 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4037 if (prot == VM_PROT_NONE && map != kernel_map && 4038 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4039 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4040 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4041 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4042 goto RetryLookupLocked; 4043 } 4044 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4045 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4046 vm_map_unlock_read(map); 4047 return (KERN_PROTECTION_FAILURE); 4048 } 4049 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4050 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4051 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4052 ("entry %p flags %x", entry, entry->eflags)); 4053 if ((fault_typea & VM_PROT_COPY) != 0 && 4054 (entry->max_protection & VM_PROT_WRITE) == 0 && 4055 (entry->eflags & MAP_ENTRY_COW) == 0) { 4056 vm_map_unlock_read(map); 4057 return (KERN_PROTECTION_FAILURE); 4058 } 4059 4060 /* 4061 * If this page is not pageable, we have to get it for all possible 4062 * accesses. 4063 */ 4064 *wired = (entry->wired_count != 0); 4065 if (*wired) 4066 fault_type = entry->protection; 4067 size = entry->end - entry->start; 4068 /* 4069 * If the entry was copy-on-write, we either ... 4070 */ 4071 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4072 /* 4073 * If we want to write the page, we may as well handle that 4074 * now since we've got the map locked. 4075 * 4076 * If we don't need to write the page, we just demote the 4077 * permissions allowed. 4078 */ 4079 if ((fault_type & VM_PROT_WRITE) != 0 || 4080 (fault_typea & VM_PROT_COPY) != 0) { 4081 /* 4082 * Make a new object, and place it in the object 4083 * chain. Note that no new references have appeared 4084 * -- one just moved from the map to the new 4085 * object. 4086 */ 4087 if (vm_map_lock_upgrade(map)) 4088 goto RetryLookup; 4089 4090 if (entry->cred == NULL) { 4091 /* 4092 * The debugger owner is charged for 4093 * the memory. 4094 */ 4095 cred = curthread->td_ucred; 4096 crhold(cred); 4097 if (!swap_reserve_by_cred(size, cred)) { 4098 crfree(cred); 4099 vm_map_unlock(map); 4100 return (KERN_RESOURCE_SHORTAGE); 4101 } 4102 entry->cred = cred; 4103 } 4104 vm_object_shadow(&entry->object.vm_object, 4105 &entry->offset, size); 4106 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4107 eobject = entry->object.vm_object; 4108 if (eobject->cred != NULL) { 4109 /* 4110 * The object was not shadowed. 4111 */ 4112 swap_release_by_cred(size, entry->cred); 4113 crfree(entry->cred); 4114 entry->cred = NULL; 4115 } else if (entry->cred != NULL) { 4116 VM_OBJECT_WLOCK(eobject); 4117 eobject->cred = entry->cred; 4118 eobject->charge = size; 4119 VM_OBJECT_WUNLOCK(eobject); 4120 entry->cred = NULL; 4121 } 4122 4123 vm_map_lock_downgrade(map); 4124 } else { 4125 /* 4126 * We're attempting to read a copy-on-write page -- 4127 * don't allow writes. 4128 */ 4129 prot &= ~VM_PROT_WRITE; 4130 } 4131 } 4132 4133 /* 4134 * Create an object if necessary. 4135 */ 4136 if (entry->object.vm_object == NULL && 4137 !map->system_map) { 4138 if (vm_map_lock_upgrade(map)) 4139 goto RetryLookup; 4140 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4141 atop(size)); 4142 entry->offset = 0; 4143 if (entry->cred != NULL) { 4144 VM_OBJECT_WLOCK(entry->object.vm_object); 4145 entry->object.vm_object->cred = entry->cred; 4146 entry->object.vm_object->charge = size; 4147 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4148 entry->cred = NULL; 4149 } 4150 vm_map_lock_downgrade(map); 4151 } 4152 4153 /* 4154 * Return the object/offset from this entry. If the entry was 4155 * copy-on-write or empty, it has been fixed up. 4156 */ 4157 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4158 *object = entry->object.vm_object; 4159 4160 *out_prot = prot; 4161 return (KERN_SUCCESS); 4162 } 4163 4164 /* 4165 * vm_map_lookup_locked: 4166 * 4167 * Lookup the faulting address. A version of vm_map_lookup that returns 4168 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4169 */ 4170 int 4171 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4172 vm_offset_t vaddr, 4173 vm_prot_t fault_typea, 4174 vm_map_entry_t *out_entry, /* OUT */ 4175 vm_object_t *object, /* OUT */ 4176 vm_pindex_t *pindex, /* OUT */ 4177 vm_prot_t *out_prot, /* OUT */ 4178 boolean_t *wired) /* OUT */ 4179 { 4180 vm_map_entry_t entry; 4181 vm_map_t map = *var_map; 4182 vm_prot_t prot; 4183 vm_prot_t fault_type = fault_typea; 4184 4185 /* 4186 * Lookup the faulting address. 4187 */ 4188 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4189 return (KERN_INVALID_ADDRESS); 4190 4191 entry = *out_entry; 4192 4193 /* 4194 * Fail if the entry refers to a submap. 4195 */ 4196 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4197 return (KERN_FAILURE); 4198 4199 /* 4200 * Check whether this task is allowed to have this page. 4201 */ 4202 prot = entry->protection; 4203 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4204 if ((fault_type & prot) != fault_type) 4205 return (KERN_PROTECTION_FAILURE); 4206 4207 /* 4208 * If this page is not pageable, we have to get it for all possible 4209 * accesses. 4210 */ 4211 *wired = (entry->wired_count != 0); 4212 if (*wired) 4213 fault_type = entry->protection; 4214 4215 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4216 /* 4217 * Fail if the entry was copy-on-write for a write fault. 4218 */ 4219 if (fault_type & VM_PROT_WRITE) 4220 return (KERN_FAILURE); 4221 /* 4222 * We're attempting to read a copy-on-write page -- 4223 * don't allow writes. 4224 */ 4225 prot &= ~VM_PROT_WRITE; 4226 } 4227 4228 /* 4229 * Fail if an object should be created. 4230 */ 4231 if (entry->object.vm_object == NULL && !map->system_map) 4232 return (KERN_FAILURE); 4233 4234 /* 4235 * Return the object/offset from this entry. If the entry was 4236 * copy-on-write or empty, it has been fixed up. 4237 */ 4238 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4239 *object = entry->object.vm_object; 4240 4241 *out_prot = prot; 4242 return (KERN_SUCCESS); 4243 } 4244 4245 /* 4246 * vm_map_lookup_done: 4247 * 4248 * Releases locks acquired by a vm_map_lookup 4249 * (according to the handle returned by that lookup). 4250 */ 4251 void 4252 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4253 { 4254 /* 4255 * Unlock the main-level map 4256 */ 4257 vm_map_unlock_read(map); 4258 } 4259 4260 #include "opt_ddb.h" 4261 #ifdef DDB 4262 #include <sys/kernel.h> 4263 4264 #include <ddb/ddb.h> 4265 4266 static void 4267 vm_map_print(vm_map_t map) 4268 { 4269 vm_map_entry_t entry; 4270 4271 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4272 (void *)map, 4273 (void *)map->pmap, map->nentries, map->timestamp); 4274 4275 db_indent += 2; 4276 for (entry = map->header.next; entry != &map->header; 4277 entry = entry->next) { 4278 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4279 (void *)entry, (void *)entry->start, (void *)entry->end, 4280 entry->eflags); 4281 { 4282 static char *inheritance_name[4] = 4283 {"share", "copy", "none", "donate_copy"}; 4284 4285 db_iprintf(" prot=%x/%x/%s", 4286 entry->protection, 4287 entry->max_protection, 4288 inheritance_name[(int)(unsigned char)entry->inheritance]); 4289 if (entry->wired_count != 0) 4290 db_printf(", wired"); 4291 } 4292 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4293 db_printf(", share=%p, offset=0x%jx\n", 4294 (void *)entry->object.sub_map, 4295 (uintmax_t)entry->offset); 4296 if ((entry->prev == &map->header) || 4297 (entry->prev->object.sub_map != 4298 entry->object.sub_map)) { 4299 db_indent += 2; 4300 vm_map_print((vm_map_t)entry->object.sub_map); 4301 db_indent -= 2; 4302 } 4303 } else { 4304 if (entry->cred != NULL) 4305 db_printf(", ruid %d", entry->cred->cr_ruid); 4306 db_printf(", object=%p, offset=0x%jx", 4307 (void *)entry->object.vm_object, 4308 (uintmax_t)entry->offset); 4309 if (entry->object.vm_object && entry->object.vm_object->cred) 4310 db_printf(", obj ruid %d charge %jx", 4311 entry->object.vm_object->cred->cr_ruid, 4312 (uintmax_t)entry->object.vm_object->charge); 4313 if (entry->eflags & MAP_ENTRY_COW) 4314 db_printf(", copy (%s)", 4315 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4316 db_printf("\n"); 4317 4318 if ((entry->prev == &map->header) || 4319 (entry->prev->object.vm_object != 4320 entry->object.vm_object)) { 4321 db_indent += 2; 4322 vm_object_print((db_expr_t)(intptr_t) 4323 entry->object.vm_object, 4324 0, 0, (char *)0); 4325 db_indent -= 2; 4326 } 4327 } 4328 } 4329 db_indent -= 2; 4330 } 4331 4332 DB_SHOW_COMMAND(map, map) 4333 { 4334 4335 if (!have_addr) { 4336 db_printf("usage: show map <addr>\n"); 4337 return; 4338 } 4339 vm_map_print((vm_map_t)addr); 4340 } 4341 4342 DB_SHOW_COMMAND(procvm, procvm) 4343 { 4344 struct proc *p; 4345 4346 if (have_addr) { 4347 p = db_lookup_proc(addr); 4348 } else { 4349 p = curproc; 4350 } 4351 4352 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4353 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4354 (void *)vmspace_pmap(p->p_vmspace)); 4355 4356 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4357 } 4358 4359 #endif /* DDB */ 4360