1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/ktr.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/racct.h> 81 #include <sys/resourcevar.h> 82 #include <sys/rwlock.h> 83 #include <sys/file.h> 84 #include <sys/sysctl.h> 85 #include <sys/sysent.h> 86 #include <sys/shm.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_pager.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/uma.h> 100 101 /* 102 * Virtual memory maps provide for the mapping, protection, 103 * and sharing of virtual memory objects. In addition, 104 * this module provides for an efficient virtual copy of 105 * memory from one map to another. 106 * 107 * Synchronization is required prior to most operations. 108 * 109 * Maps consist of an ordered doubly-linked list of simple 110 * entries; a self-adjusting binary search tree of these 111 * entries is used to speed up lookups. 112 * 113 * Since portions of maps are specified by start/end addresses, 114 * which may not align with existing map entries, all 115 * routines merely "clip" entries to these start/end values. 116 * [That is, an entry is split into two, bordering at a 117 * start or end value.] Note that these clippings may not 118 * always be necessary (as the two resulting entries are then 119 * not changed); however, the clipping is done for convenience. 120 * 121 * As mentioned above, virtual copy operations are performed 122 * by copying VM object references from one map to 123 * another, and then marking both regions as copy-on-write. 124 */ 125 126 static struct mtx map_sleep_mtx; 127 static uma_zone_t mapentzone; 128 static uma_zone_t kmapentzone; 129 static uma_zone_t mapzone; 130 static uma_zone_t vmspace_zone; 131 static int vmspace_zinit(void *mem, int size, int flags); 132 static int vm_map_zinit(void *mem, int ize, int flags); 133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 134 vm_offset_t max); 135 static int vm_map_alignspace(vm_map_t map, vm_object_t object, 136 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, 137 vm_offset_t max_addr, vm_offset_t alignment); 138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 142 vm_map_entry_t gap_entry); 143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 144 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 145 #ifdef INVARIANTS 146 static void vm_map_zdtor(void *mem, int size, void *arg); 147 static void vmspace_zdtor(void *mem, int size, void *arg); 148 #endif 149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 150 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 151 int cow); 152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 153 vm_offset_t failed_addr); 154 155 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 156 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 157 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 158 159 /* 160 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 161 * stable. 162 */ 163 #define PROC_VMSPACE_LOCK(p) do { } while (0) 164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 165 166 /* 167 * VM_MAP_RANGE_CHECK: [ internal use only ] 168 * 169 * Asserts that the starting and ending region 170 * addresses fall within the valid range of the map. 171 */ 172 #define VM_MAP_RANGE_CHECK(map, start, end) \ 173 { \ 174 if (start < vm_map_min(map)) \ 175 start = vm_map_min(map); \ 176 if (end > vm_map_max(map)) \ 177 end = vm_map_max(map); \ 178 if (start > end) \ 179 start = end; \ 180 } 181 182 /* 183 * vm_map_startup: 184 * 185 * Initialize the vm_map module. Must be called before 186 * any other vm_map routines. 187 * 188 * Map and entry structures are allocated from the general 189 * purpose memory pool with some exceptions: 190 * 191 * - The kernel map and kmem submap are allocated statically. 192 * - Kernel map entries are allocated out of a static pool. 193 * 194 * These restrictions are necessary since malloc() uses the 195 * maps and requires map entries. 196 */ 197 198 void 199 vm_map_startup(void) 200 { 201 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 202 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 203 #ifdef INVARIANTS 204 vm_map_zdtor, 205 #else 206 NULL, 207 #endif 208 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 209 uma_prealloc(mapzone, MAX_KMAP); 210 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 212 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 213 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 214 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 215 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 216 #ifdef INVARIANTS 217 vmspace_zdtor, 218 #else 219 NULL, 220 #endif 221 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 222 } 223 224 static int 225 vmspace_zinit(void *mem, int size, int flags) 226 { 227 struct vmspace *vm; 228 229 vm = (struct vmspace *)mem; 230 231 vm->vm_map.pmap = NULL; 232 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 233 PMAP_LOCK_INIT(vmspace_pmap(vm)); 234 return (0); 235 } 236 237 static int 238 vm_map_zinit(void *mem, int size, int flags) 239 { 240 vm_map_t map; 241 242 map = (vm_map_t)mem; 243 memset(map, 0, sizeof(*map)); 244 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 245 sx_init(&map->lock, "vm map (user)"); 246 return (0); 247 } 248 249 #ifdef INVARIANTS 250 static void 251 vmspace_zdtor(void *mem, int size, void *arg) 252 { 253 struct vmspace *vm; 254 255 vm = (struct vmspace *)mem; 256 257 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 258 } 259 static void 260 vm_map_zdtor(void *mem, int size, void *arg) 261 { 262 vm_map_t map; 263 264 map = (vm_map_t)mem; 265 KASSERT(map->nentries == 0, 266 ("map %p nentries == %d on free.", 267 map, map->nentries)); 268 KASSERT(map->size == 0, 269 ("map %p size == %lu on free.", 270 map, (unsigned long)map->size)); 271 } 272 #endif /* INVARIANTS */ 273 274 /* 275 * Allocate a vmspace structure, including a vm_map and pmap, 276 * and initialize those structures. The refcnt is set to 1. 277 * 278 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 279 */ 280 struct vmspace * 281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 282 { 283 struct vmspace *vm; 284 285 vm = uma_zalloc(vmspace_zone, M_WAITOK); 286 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 287 if (!pinit(vmspace_pmap(vm))) { 288 uma_zfree(vmspace_zone, vm); 289 return (NULL); 290 } 291 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 292 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 293 vm->vm_refcnt = 1; 294 vm->vm_shm = NULL; 295 vm->vm_swrss = 0; 296 vm->vm_tsize = 0; 297 vm->vm_dsize = 0; 298 vm->vm_ssize = 0; 299 vm->vm_taddr = 0; 300 vm->vm_daddr = 0; 301 vm->vm_maxsaddr = 0; 302 return (vm); 303 } 304 305 #ifdef RACCT 306 static void 307 vmspace_container_reset(struct proc *p) 308 { 309 310 PROC_LOCK(p); 311 racct_set(p, RACCT_DATA, 0); 312 racct_set(p, RACCT_STACK, 0); 313 racct_set(p, RACCT_RSS, 0); 314 racct_set(p, RACCT_MEMLOCK, 0); 315 racct_set(p, RACCT_VMEM, 0); 316 PROC_UNLOCK(p); 317 } 318 #endif 319 320 static inline void 321 vmspace_dofree(struct vmspace *vm) 322 { 323 324 CTR1(KTR_VM, "vmspace_free: %p", vm); 325 326 /* 327 * Make sure any SysV shm is freed, it might not have been in 328 * exit1(). 329 */ 330 shmexit(vm); 331 332 /* 333 * Lock the map, to wait out all other references to it. 334 * Delete all of the mappings and pages they hold, then call 335 * the pmap module to reclaim anything left. 336 */ 337 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 338 vm_map_max(&vm->vm_map)); 339 340 pmap_release(vmspace_pmap(vm)); 341 vm->vm_map.pmap = NULL; 342 uma_zfree(vmspace_zone, vm); 343 } 344 345 void 346 vmspace_free(struct vmspace *vm) 347 { 348 349 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 350 "vmspace_free() called"); 351 352 if (vm->vm_refcnt == 0) 353 panic("vmspace_free: attempt to free already freed vmspace"); 354 355 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 356 vmspace_dofree(vm); 357 } 358 359 void 360 vmspace_exitfree(struct proc *p) 361 { 362 struct vmspace *vm; 363 364 PROC_VMSPACE_LOCK(p); 365 vm = p->p_vmspace; 366 p->p_vmspace = NULL; 367 PROC_VMSPACE_UNLOCK(p); 368 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 369 vmspace_free(vm); 370 } 371 372 void 373 vmspace_exit(struct thread *td) 374 { 375 int refcnt; 376 struct vmspace *vm; 377 struct proc *p; 378 379 /* 380 * Release user portion of address space. 381 * This releases references to vnodes, 382 * which could cause I/O if the file has been unlinked. 383 * Need to do this early enough that we can still sleep. 384 * 385 * The last exiting process to reach this point releases as 386 * much of the environment as it can. vmspace_dofree() is the 387 * slower fallback in case another process had a temporary 388 * reference to the vmspace. 389 */ 390 391 p = td->td_proc; 392 vm = p->p_vmspace; 393 atomic_add_int(&vmspace0.vm_refcnt, 1); 394 refcnt = vm->vm_refcnt; 395 do { 396 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 397 /* Switch now since other proc might free vmspace */ 398 PROC_VMSPACE_LOCK(p); 399 p->p_vmspace = &vmspace0; 400 PROC_VMSPACE_UNLOCK(p); 401 pmap_activate(td); 402 } 403 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); 404 if (refcnt == 1) { 405 if (p->p_vmspace != vm) { 406 /* vmspace not yet freed, switch back */ 407 PROC_VMSPACE_LOCK(p); 408 p->p_vmspace = vm; 409 PROC_VMSPACE_UNLOCK(p); 410 pmap_activate(td); 411 } 412 pmap_remove_pages(vmspace_pmap(vm)); 413 /* Switch now since this proc will free vmspace */ 414 PROC_VMSPACE_LOCK(p); 415 p->p_vmspace = &vmspace0; 416 PROC_VMSPACE_UNLOCK(p); 417 pmap_activate(td); 418 vmspace_dofree(vm); 419 } 420 #ifdef RACCT 421 if (racct_enable) 422 vmspace_container_reset(p); 423 #endif 424 } 425 426 /* Acquire reference to vmspace owned by another process. */ 427 428 struct vmspace * 429 vmspace_acquire_ref(struct proc *p) 430 { 431 struct vmspace *vm; 432 int refcnt; 433 434 PROC_VMSPACE_LOCK(p); 435 vm = p->p_vmspace; 436 if (vm == NULL) { 437 PROC_VMSPACE_UNLOCK(p); 438 return (NULL); 439 } 440 refcnt = vm->vm_refcnt; 441 do { 442 if (refcnt <= 0) { /* Avoid 0->1 transition */ 443 PROC_VMSPACE_UNLOCK(p); 444 return (NULL); 445 } 446 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1)); 447 if (vm != p->p_vmspace) { 448 PROC_VMSPACE_UNLOCK(p); 449 vmspace_free(vm); 450 return (NULL); 451 } 452 PROC_VMSPACE_UNLOCK(p); 453 return (vm); 454 } 455 456 /* 457 * Switch between vmspaces in an AIO kernel process. 458 * 459 * The AIO kernel processes switch to and from a user process's 460 * vmspace while performing an I/O operation on behalf of a user 461 * process. The new vmspace is either the vmspace of a user process 462 * obtained from an active AIO request or the initial vmspace of the 463 * AIO kernel process (when it is idling). Because user processes 464 * will block to drain any active AIO requests before proceeding in 465 * exit() or execve(), the vmspace reference count for these vmspaces 466 * can never be 0. This allows for a much simpler implementation than 467 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel 468 * processes hold an extra reference on their initial vmspace for the 469 * life of the process so that this guarantee is true for any vmspace 470 * passed as 'newvm'. 471 */ 472 void 473 vmspace_switch_aio(struct vmspace *newvm) 474 { 475 struct vmspace *oldvm; 476 477 /* XXX: Need some way to assert that this is an aio daemon. */ 478 479 KASSERT(newvm->vm_refcnt > 0, 480 ("vmspace_switch_aio: newvm unreferenced")); 481 482 oldvm = curproc->p_vmspace; 483 if (oldvm == newvm) 484 return; 485 486 /* 487 * Point to the new address space and refer to it. 488 */ 489 curproc->p_vmspace = newvm; 490 atomic_add_int(&newvm->vm_refcnt, 1); 491 492 /* Activate the new mapping. */ 493 pmap_activate(curthread); 494 495 /* Remove the daemon's reference to the old address space. */ 496 KASSERT(oldvm->vm_refcnt > 1, 497 ("vmspace_switch_aio: oldvm dropping last reference")); 498 vmspace_free(oldvm); 499 } 500 501 void 502 _vm_map_lock(vm_map_t map, const char *file, int line) 503 { 504 505 if (map->system_map) 506 mtx_lock_flags_(&map->system_mtx, 0, file, line); 507 else 508 sx_xlock_(&map->lock, file, line); 509 map->timestamp++; 510 } 511 512 static void 513 vm_map_process_deferred(void) 514 { 515 struct thread *td; 516 vm_map_entry_t entry, next; 517 vm_object_t object; 518 519 td = curthread; 520 entry = td->td_map_def_user; 521 td->td_map_def_user = NULL; 522 while (entry != NULL) { 523 next = entry->next; 524 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 525 /* 526 * Decrement the object's writemappings and 527 * possibly the vnode's v_writecount. 528 */ 529 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 530 ("Submap with writecount")); 531 object = entry->object.vm_object; 532 KASSERT(object != NULL, ("No object for writecount")); 533 vnode_pager_release_writecount(object, entry->start, 534 entry->end); 535 } 536 vm_map_entry_deallocate(entry, FALSE); 537 entry = next; 538 } 539 } 540 541 void 542 _vm_map_unlock(vm_map_t map, const char *file, int line) 543 { 544 545 if (map->system_map) 546 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 547 else { 548 sx_xunlock_(&map->lock, file, line); 549 vm_map_process_deferred(); 550 } 551 } 552 553 void 554 _vm_map_lock_read(vm_map_t map, const char *file, int line) 555 { 556 557 if (map->system_map) 558 mtx_lock_flags_(&map->system_mtx, 0, file, line); 559 else 560 sx_slock_(&map->lock, file, line); 561 } 562 563 void 564 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 565 { 566 567 if (map->system_map) 568 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 569 else { 570 sx_sunlock_(&map->lock, file, line); 571 vm_map_process_deferred(); 572 } 573 } 574 575 int 576 _vm_map_trylock(vm_map_t map, const char *file, int line) 577 { 578 int error; 579 580 error = map->system_map ? 581 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 582 !sx_try_xlock_(&map->lock, file, line); 583 if (error == 0) 584 map->timestamp++; 585 return (error == 0); 586 } 587 588 int 589 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 590 { 591 int error; 592 593 error = map->system_map ? 594 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 595 !sx_try_slock_(&map->lock, file, line); 596 return (error == 0); 597 } 598 599 /* 600 * _vm_map_lock_upgrade: [ internal use only ] 601 * 602 * Tries to upgrade a read (shared) lock on the specified map to a write 603 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 604 * non-zero value if the upgrade fails. If the upgrade fails, the map is 605 * returned without a read or write lock held. 606 * 607 * Requires that the map be read locked. 608 */ 609 int 610 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 611 { 612 unsigned int last_timestamp; 613 614 if (map->system_map) { 615 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 616 } else { 617 if (!sx_try_upgrade_(&map->lock, file, line)) { 618 last_timestamp = map->timestamp; 619 sx_sunlock_(&map->lock, file, line); 620 vm_map_process_deferred(); 621 /* 622 * If the map's timestamp does not change while the 623 * map is unlocked, then the upgrade succeeds. 624 */ 625 sx_xlock_(&map->lock, file, line); 626 if (last_timestamp != map->timestamp) { 627 sx_xunlock_(&map->lock, file, line); 628 return (1); 629 } 630 } 631 } 632 map->timestamp++; 633 return (0); 634 } 635 636 void 637 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 638 { 639 640 if (map->system_map) { 641 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 642 } else 643 sx_downgrade_(&map->lock, file, line); 644 } 645 646 /* 647 * vm_map_locked: 648 * 649 * Returns a non-zero value if the caller holds a write (exclusive) lock 650 * on the specified map and the value "0" otherwise. 651 */ 652 int 653 vm_map_locked(vm_map_t map) 654 { 655 656 if (map->system_map) 657 return (mtx_owned(&map->system_mtx)); 658 else 659 return (sx_xlocked(&map->lock)); 660 } 661 662 #ifdef INVARIANTS 663 static void 664 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 665 { 666 667 if (map->system_map) 668 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 669 else 670 sx_assert_(&map->lock, SA_XLOCKED, file, line); 671 } 672 673 #define VM_MAP_ASSERT_LOCKED(map) \ 674 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 675 #else 676 #define VM_MAP_ASSERT_LOCKED(map) 677 #endif 678 679 /* 680 * _vm_map_unlock_and_wait: 681 * 682 * Atomically releases the lock on the specified map and puts the calling 683 * thread to sleep. The calling thread will remain asleep until either 684 * vm_map_wakeup() is performed on the map or the specified timeout is 685 * exceeded. 686 * 687 * WARNING! This function does not perform deferred deallocations of 688 * objects and map entries. Therefore, the calling thread is expected to 689 * reacquire the map lock after reawakening and later perform an ordinary 690 * unlock operation, such as vm_map_unlock(), before completing its 691 * operation on the map. 692 */ 693 int 694 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 695 { 696 697 mtx_lock(&map_sleep_mtx); 698 if (map->system_map) 699 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 700 else 701 sx_xunlock_(&map->lock, file, line); 702 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 703 timo)); 704 } 705 706 /* 707 * vm_map_wakeup: 708 * 709 * Awaken any threads that have slept on the map using 710 * vm_map_unlock_and_wait(). 711 */ 712 void 713 vm_map_wakeup(vm_map_t map) 714 { 715 716 /* 717 * Acquire and release map_sleep_mtx to prevent a wakeup() 718 * from being performed (and lost) between the map unlock 719 * and the msleep() in _vm_map_unlock_and_wait(). 720 */ 721 mtx_lock(&map_sleep_mtx); 722 mtx_unlock(&map_sleep_mtx); 723 wakeup(&map->root); 724 } 725 726 void 727 vm_map_busy(vm_map_t map) 728 { 729 730 VM_MAP_ASSERT_LOCKED(map); 731 map->busy++; 732 } 733 734 void 735 vm_map_unbusy(vm_map_t map) 736 { 737 738 VM_MAP_ASSERT_LOCKED(map); 739 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 740 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 741 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 742 wakeup(&map->busy); 743 } 744 } 745 746 void 747 vm_map_wait_busy(vm_map_t map) 748 { 749 750 VM_MAP_ASSERT_LOCKED(map); 751 while (map->busy) { 752 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 753 if (map->system_map) 754 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 755 else 756 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 757 } 758 map->timestamp++; 759 } 760 761 long 762 vmspace_resident_count(struct vmspace *vmspace) 763 { 764 return pmap_resident_count(vmspace_pmap(vmspace)); 765 } 766 767 /* 768 * vm_map_create: 769 * 770 * Creates and returns a new empty VM map with 771 * the given physical map structure, and having 772 * the given lower and upper address bounds. 773 */ 774 vm_map_t 775 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 776 { 777 vm_map_t result; 778 779 result = uma_zalloc(mapzone, M_WAITOK); 780 CTR1(KTR_VM, "vm_map_create: %p", result); 781 _vm_map_init(result, pmap, min, max); 782 return (result); 783 } 784 785 /* 786 * Initialize an existing vm_map structure 787 * such as that in the vmspace structure. 788 */ 789 static void 790 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 791 { 792 793 map->header.next = map->header.prev = &map->header; 794 map->header.eflags = MAP_ENTRY_HEADER; 795 map->needs_wakeup = FALSE; 796 map->system_map = 0; 797 map->pmap = pmap; 798 map->header.end = min; 799 map->header.start = max; 800 map->flags = 0; 801 map->root = NULL; 802 map->timestamp = 0; 803 map->busy = 0; 804 } 805 806 void 807 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 808 { 809 810 _vm_map_init(map, pmap, min, max); 811 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 812 sx_init(&map->lock, "user map"); 813 } 814 815 /* 816 * vm_map_entry_dispose: [ internal use only ] 817 * 818 * Inverse of vm_map_entry_create. 819 */ 820 static void 821 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 822 { 823 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 824 } 825 826 /* 827 * vm_map_entry_create: [ internal use only ] 828 * 829 * Allocates a VM map entry for insertion. 830 * No entry fields are filled in. 831 */ 832 static vm_map_entry_t 833 vm_map_entry_create(vm_map_t map) 834 { 835 vm_map_entry_t new_entry; 836 837 if (map->system_map) 838 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 839 else 840 new_entry = uma_zalloc(mapentzone, M_WAITOK); 841 if (new_entry == NULL) 842 panic("vm_map_entry_create: kernel resources exhausted"); 843 return (new_entry); 844 } 845 846 /* 847 * vm_map_entry_set_behavior: 848 * 849 * Set the expected access behavior, either normal, random, or 850 * sequential. 851 */ 852 static inline void 853 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 854 { 855 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 856 (behavior & MAP_ENTRY_BEHAV_MASK); 857 } 858 859 /* 860 * vm_map_entry_set_max_free: 861 * 862 * Set the max_free field in a vm_map_entry. 863 */ 864 static inline void 865 vm_map_entry_set_max_free(vm_map_entry_t entry) 866 { 867 868 entry->max_free = entry->adj_free; 869 if (entry->left != NULL && entry->left->max_free > entry->max_free) 870 entry->max_free = entry->left->max_free; 871 if (entry->right != NULL && entry->right->max_free > entry->max_free) 872 entry->max_free = entry->right->max_free; 873 } 874 875 /* 876 * vm_map_entry_splay: 877 * 878 * The Sleator and Tarjan top-down splay algorithm with the 879 * following variation. Max_free must be computed bottom-up, so 880 * on the downward pass, maintain the left and right spines in 881 * reverse order. Then, make a second pass up each side to fix 882 * the pointers and compute max_free. The time bound is O(log n) 883 * amortized. 884 * 885 * The new root is the vm_map_entry containing "addr", or else an 886 * adjacent entry (lower or higher) if addr is not in the tree. 887 * 888 * The map must be locked, and leaves it so. 889 * 890 * Returns: the new root. 891 */ 892 static vm_map_entry_t 893 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 894 { 895 vm_map_entry_t llist, rlist; 896 vm_map_entry_t ltree, rtree; 897 vm_map_entry_t y; 898 899 /* Special case of empty tree. */ 900 if (root == NULL) 901 return (root); 902 903 /* 904 * Pass One: Splay down the tree until we find addr or a NULL 905 * pointer where addr would go. llist and rlist are the two 906 * sides in reverse order (bottom-up), with llist linked by 907 * the right pointer and rlist linked by the left pointer in 908 * the vm_map_entry. Wait until Pass Two to set max_free on 909 * the two spines. 910 */ 911 llist = NULL; 912 rlist = NULL; 913 for (;;) { 914 /* root is never NULL in here. */ 915 if (addr < root->start) { 916 y = root->left; 917 if (y == NULL) 918 break; 919 if (addr < y->start && y->left != NULL) { 920 /* Rotate right and put y on rlist. */ 921 root->left = y->right; 922 y->right = root; 923 vm_map_entry_set_max_free(root); 924 root = y->left; 925 y->left = rlist; 926 rlist = y; 927 } else { 928 /* Put root on rlist. */ 929 root->left = rlist; 930 rlist = root; 931 root = y; 932 } 933 } else if (addr >= root->end) { 934 y = root->right; 935 if (y == NULL) 936 break; 937 if (addr >= y->end && y->right != NULL) { 938 /* Rotate left and put y on llist. */ 939 root->right = y->left; 940 y->left = root; 941 vm_map_entry_set_max_free(root); 942 root = y->right; 943 y->right = llist; 944 llist = y; 945 } else { 946 /* Put root on llist. */ 947 root->right = llist; 948 llist = root; 949 root = y; 950 } 951 } else 952 break; 953 } 954 955 /* 956 * Pass Two: Walk back up the two spines, flip the pointers 957 * and set max_free. The subtrees of the root go at the 958 * bottom of llist and rlist. 959 */ 960 ltree = root->left; 961 while (llist != NULL) { 962 y = llist->right; 963 llist->right = ltree; 964 vm_map_entry_set_max_free(llist); 965 ltree = llist; 966 llist = y; 967 } 968 rtree = root->right; 969 while (rlist != NULL) { 970 y = rlist->left; 971 rlist->left = rtree; 972 vm_map_entry_set_max_free(rlist); 973 rtree = rlist; 974 rlist = y; 975 } 976 977 /* 978 * Final assembly: add ltree and rtree as subtrees of root. 979 */ 980 root->left = ltree; 981 root->right = rtree; 982 vm_map_entry_set_max_free(root); 983 984 return (root); 985 } 986 987 /* 988 * vm_map_entry_{un,}link: 989 * 990 * Insert/remove entries from maps. 991 */ 992 static void 993 vm_map_entry_link(vm_map_t map, 994 vm_map_entry_t after_where, 995 vm_map_entry_t entry) 996 { 997 998 CTR4(KTR_VM, 999 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 1000 map->nentries, entry, after_where); 1001 VM_MAP_ASSERT_LOCKED(map); 1002 KASSERT(after_where->end <= entry->start, 1003 ("vm_map_entry_link: prev end %jx new start %jx overlap", 1004 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 1005 KASSERT(entry->end <= after_where->next->start, 1006 ("vm_map_entry_link: new end %jx next start %jx overlap", 1007 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 1008 1009 map->nentries++; 1010 entry->prev = after_where; 1011 entry->next = after_where->next; 1012 entry->next->prev = entry; 1013 after_where->next = entry; 1014 1015 if (after_where != &map->header) { 1016 if (after_where != map->root) 1017 vm_map_entry_splay(after_where->start, map->root); 1018 entry->right = after_where->right; 1019 entry->left = after_where; 1020 after_where->right = NULL; 1021 after_where->adj_free = entry->start - after_where->end; 1022 vm_map_entry_set_max_free(after_where); 1023 } else { 1024 entry->right = map->root; 1025 entry->left = NULL; 1026 } 1027 entry->adj_free = entry->next->start - entry->end; 1028 vm_map_entry_set_max_free(entry); 1029 map->root = entry; 1030 } 1031 1032 static void 1033 vm_map_entry_unlink(vm_map_t map, 1034 vm_map_entry_t entry) 1035 { 1036 vm_map_entry_t next, prev, root; 1037 1038 VM_MAP_ASSERT_LOCKED(map); 1039 if (entry != map->root) 1040 vm_map_entry_splay(entry->start, map->root); 1041 if (entry->left == NULL) 1042 root = entry->right; 1043 else { 1044 root = vm_map_entry_splay(entry->start, entry->left); 1045 root->right = entry->right; 1046 root->adj_free = entry->next->start - root->end; 1047 vm_map_entry_set_max_free(root); 1048 } 1049 map->root = root; 1050 1051 prev = entry->prev; 1052 next = entry->next; 1053 next->prev = prev; 1054 prev->next = next; 1055 map->nentries--; 1056 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1057 map->nentries, entry); 1058 } 1059 1060 /* 1061 * vm_map_entry_resize_free: 1062 * 1063 * Recompute the amount of free space following a vm_map_entry 1064 * and propagate that value up the tree. Call this function after 1065 * resizing a map entry in-place, that is, without a call to 1066 * vm_map_entry_link() or _unlink(). 1067 * 1068 * The map must be locked, and leaves it so. 1069 */ 1070 static void 1071 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1072 { 1073 1074 /* 1075 * Using splay trees without parent pointers, propagating 1076 * max_free up the tree is done by moving the entry to the 1077 * root and making the change there. 1078 */ 1079 if (entry != map->root) 1080 map->root = vm_map_entry_splay(entry->start, map->root); 1081 1082 entry->adj_free = entry->next->start - entry->end; 1083 vm_map_entry_set_max_free(entry); 1084 } 1085 1086 /* 1087 * vm_map_lookup_entry: [ internal use only ] 1088 * 1089 * Finds the map entry containing (or 1090 * immediately preceding) the specified address 1091 * in the given map; the entry is returned 1092 * in the "entry" parameter. The boolean 1093 * result indicates whether the address is 1094 * actually contained in the map. 1095 */ 1096 boolean_t 1097 vm_map_lookup_entry( 1098 vm_map_t map, 1099 vm_offset_t address, 1100 vm_map_entry_t *entry) /* OUT */ 1101 { 1102 vm_map_entry_t cur; 1103 boolean_t locked; 1104 1105 /* 1106 * If the map is empty, then the map entry immediately preceding 1107 * "address" is the map's header. 1108 */ 1109 cur = map->root; 1110 if (cur == NULL) 1111 *entry = &map->header; 1112 else if (address >= cur->start && cur->end > address) { 1113 *entry = cur; 1114 return (TRUE); 1115 } else if ((locked = vm_map_locked(map)) || 1116 sx_try_upgrade(&map->lock)) { 1117 /* 1118 * Splay requires a write lock on the map. However, it only 1119 * restructures the binary search tree; it does not otherwise 1120 * change the map. Thus, the map's timestamp need not change 1121 * on a temporary upgrade. 1122 */ 1123 map->root = cur = vm_map_entry_splay(address, cur); 1124 if (!locked) 1125 sx_downgrade(&map->lock); 1126 1127 /* 1128 * If "address" is contained within a map entry, the new root 1129 * is that map entry. Otherwise, the new root is a map entry 1130 * immediately before or after "address". 1131 */ 1132 if (address >= cur->start) { 1133 *entry = cur; 1134 if (cur->end > address) 1135 return (TRUE); 1136 } else 1137 *entry = cur->prev; 1138 } else 1139 /* 1140 * Since the map is only locked for read access, perform a 1141 * standard binary search tree lookup for "address". 1142 */ 1143 for (;;) { 1144 if (address < cur->start) { 1145 if (cur->left == NULL) { 1146 *entry = cur->prev; 1147 break; 1148 } 1149 cur = cur->left; 1150 } else if (cur->end > address) { 1151 *entry = cur; 1152 return (TRUE); 1153 } else { 1154 if (cur->right == NULL) { 1155 *entry = cur; 1156 break; 1157 } 1158 cur = cur->right; 1159 } 1160 } 1161 return (FALSE); 1162 } 1163 1164 /* 1165 * vm_map_insert: 1166 * 1167 * Inserts the given whole VM object into the target 1168 * map at the specified address range. The object's 1169 * size should match that of the address range. 1170 * 1171 * Requires that the map be locked, and leaves it so. 1172 * 1173 * If object is non-NULL, ref count must be bumped by caller 1174 * prior to making call to account for the new entry. 1175 */ 1176 int 1177 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1178 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1179 { 1180 vm_map_entry_t new_entry, prev_entry, temp_entry; 1181 struct ucred *cred; 1182 vm_eflags_t protoeflags; 1183 vm_inherit_t inheritance; 1184 1185 VM_MAP_ASSERT_LOCKED(map); 1186 KASSERT(object != kernel_object || 1187 (cow & MAP_COPY_ON_WRITE) == 0, 1188 ("vm_map_insert: kernel object and COW")); 1189 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1190 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1191 KASSERT((prot & ~max) == 0, 1192 ("prot %#x is not subset of max_prot %#x", prot, max)); 1193 1194 /* 1195 * Check that the start and end points are not bogus. 1196 */ 1197 if (start < vm_map_min(map) || end > vm_map_max(map) || 1198 start >= end) 1199 return (KERN_INVALID_ADDRESS); 1200 1201 /* 1202 * Find the entry prior to the proposed starting address; if it's part 1203 * of an existing entry, this range is bogus. 1204 */ 1205 if (vm_map_lookup_entry(map, start, &temp_entry)) 1206 return (KERN_NO_SPACE); 1207 1208 prev_entry = temp_entry; 1209 1210 /* 1211 * Assert that the next entry doesn't overlap the end point. 1212 */ 1213 if (prev_entry->next->start < end) 1214 return (KERN_NO_SPACE); 1215 1216 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1217 max != VM_PROT_NONE)) 1218 return (KERN_INVALID_ARGUMENT); 1219 1220 protoeflags = 0; 1221 if (cow & MAP_COPY_ON_WRITE) 1222 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1223 if (cow & MAP_NOFAULT) 1224 protoeflags |= MAP_ENTRY_NOFAULT; 1225 if (cow & MAP_DISABLE_SYNCER) 1226 protoeflags |= MAP_ENTRY_NOSYNC; 1227 if (cow & MAP_DISABLE_COREDUMP) 1228 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1229 if (cow & MAP_STACK_GROWS_DOWN) 1230 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1231 if (cow & MAP_STACK_GROWS_UP) 1232 protoeflags |= MAP_ENTRY_GROWS_UP; 1233 if (cow & MAP_VN_WRITECOUNT) 1234 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1235 if ((cow & MAP_CREATE_GUARD) != 0) 1236 protoeflags |= MAP_ENTRY_GUARD; 1237 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1238 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1239 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1240 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1241 if (cow & MAP_INHERIT_SHARE) 1242 inheritance = VM_INHERIT_SHARE; 1243 else 1244 inheritance = VM_INHERIT_DEFAULT; 1245 1246 cred = NULL; 1247 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1248 goto charged; 1249 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1250 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1251 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1252 return (KERN_RESOURCE_SHORTAGE); 1253 KASSERT(object == NULL || 1254 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1255 object->cred == NULL, 1256 ("overcommit: vm_map_insert o %p", object)); 1257 cred = curthread->td_ucred; 1258 } 1259 1260 charged: 1261 /* Expand the kernel pmap, if necessary. */ 1262 if (map == kernel_map && end > kernel_vm_end) 1263 pmap_growkernel(end); 1264 if (object != NULL) { 1265 /* 1266 * OBJ_ONEMAPPING must be cleared unless this mapping 1267 * is trivially proven to be the only mapping for any 1268 * of the object's pages. (Object granularity 1269 * reference counting is insufficient to recognize 1270 * aliases with precision.) 1271 */ 1272 VM_OBJECT_WLOCK(object); 1273 if (object->ref_count > 1 || object->shadow_count != 0) 1274 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1275 VM_OBJECT_WUNLOCK(object); 1276 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1277 protoeflags && 1278 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1279 prev_entry->end == start && (prev_entry->cred == cred || 1280 (prev_entry->object.vm_object != NULL && 1281 prev_entry->object.vm_object->cred == cred)) && 1282 vm_object_coalesce(prev_entry->object.vm_object, 1283 prev_entry->offset, 1284 (vm_size_t)(prev_entry->end - prev_entry->start), 1285 (vm_size_t)(end - prev_entry->end), cred != NULL && 1286 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1287 /* 1288 * We were able to extend the object. Determine if we 1289 * can extend the previous map entry to include the 1290 * new range as well. 1291 */ 1292 if (prev_entry->inheritance == inheritance && 1293 prev_entry->protection == prot && 1294 prev_entry->max_protection == max && 1295 prev_entry->wired_count == 0) { 1296 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1297 0, ("prev_entry %p has incoherent wiring", 1298 prev_entry)); 1299 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1300 map->size += end - prev_entry->end; 1301 prev_entry->end = end; 1302 vm_map_entry_resize_free(map, prev_entry); 1303 vm_map_simplify_entry(map, prev_entry); 1304 return (KERN_SUCCESS); 1305 } 1306 1307 /* 1308 * If we can extend the object but cannot extend the 1309 * map entry, we have to create a new map entry. We 1310 * must bump the ref count on the extended object to 1311 * account for it. object may be NULL. 1312 */ 1313 object = prev_entry->object.vm_object; 1314 offset = prev_entry->offset + 1315 (prev_entry->end - prev_entry->start); 1316 vm_object_reference(object); 1317 if (cred != NULL && object != NULL && object->cred != NULL && 1318 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1319 /* Object already accounts for this uid. */ 1320 cred = NULL; 1321 } 1322 } 1323 if (cred != NULL) 1324 crhold(cred); 1325 1326 /* 1327 * Create a new entry 1328 */ 1329 new_entry = vm_map_entry_create(map); 1330 new_entry->start = start; 1331 new_entry->end = end; 1332 new_entry->cred = NULL; 1333 1334 new_entry->eflags = protoeflags; 1335 new_entry->object.vm_object = object; 1336 new_entry->offset = offset; 1337 1338 new_entry->inheritance = inheritance; 1339 new_entry->protection = prot; 1340 new_entry->max_protection = max; 1341 new_entry->wired_count = 0; 1342 new_entry->wiring_thread = NULL; 1343 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1344 new_entry->next_read = start; 1345 1346 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1347 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1348 new_entry->cred = cred; 1349 1350 /* 1351 * Insert the new entry into the list 1352 */ 1353 vm_map_entry_link(map, prev_entry, new_entry); 1354 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1355 map->size += new_entry->end - new_entry->start; 1356 1357 /* 1358 * Try to coalesce the new entry with both the previous and next 1359 * entries in the list. Previously, we only attempted to coalesce 1360 * with the previous entry when object is NULL. Here, we handle the 1361 * other cases, which are less common. 1362 */ 1363 vm_map_simplify_entry(map, new_entry); 1364 1365 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1366 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1367 end - start, cow & MAP_PREFAULT_PARTIAL); 1368 } 1369 1370 return (KERN_SUCCESS); 1371 } 1372 1373 /* 1374 * vm_map_findspace: 1375 * 1376 * Find the first fit (lowest VM address) for "length" free bytes 1377 * beginning at address >= start in the given map. 1378 * 1379 * In a vm_map_entry, "adj_free" is the amount of free space 1380 * adjacent (higher address) to this entry, and "max_free" is the 1381 * maximum amount of contiguous free space in its subtree. This 1382 * allows finding a free region in one path down the tree, so 1383 * O(log n) amortized with splay trees. 1384 * 1385 * The map must be locked, and leaves it so. 1386 * 1387 * Returns: 0 on success, and starting address in *addr, 1388 * 1 if insufficient space. 1389 */ 1390 int 1391 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1392 vm_offset_t *addr) /* OUT */ 1393 { 1394 vm_map_entry_t entry; 1395 vm_offset_t st; 1396 1397 /* 1398 * Request must fit within min/max VM address and must avoid 1399 * address wrap. 1400 */ 1401 start = MAX(start, vm_map_min(map)); 1402 if (start + length > vm_map_max(map) || start + length < start) 1403 return (1); 1404 1405 /* Empty tree means wide open address space. */ 1406 if (map->root == NULL) { 1407 *addr = start; 1408 return (0); 1409 } 1410 1411 /* 1412 * After splay, if start comes before root node, then there 1413 * must be a gap from start to the root. 1414 */ 1415 map->root = vm_map_entry_splay(start, map->root); 1416 if (start + length <= map->root->start) { 1417 *addr = start; 1418 return (0); 1419 } 1420 1421 /* 1422 * Root is the last node that might begin its gap before 1423 * start, and this is the last comparison where address 1424 * wrap might be a problem. 1425 */ 1426 st = (start > map->root->end) ? start : map->root->end; 1427 if (length <= map->root->end + map->root->adj_free - st) { 1428 *addr = st; 1429 return (0); 1430 } 1431 1432 /* With max_free, can immediately tell if no solution. */ 1433 entry = map->root->right; 1434 if (entry == NULL || length > entry->max_free) 1435 return (1); 1436 1437 /* 1438 * Search the right subtree in the order: left subtree, root, 1439 * right subtree (first fit). The previous splay implies that 1440 * all regions in the right subtree have addresses > start. 1441 */ 1442 while (entry != NULL) { 1443 if (entry->left != NULL && entry->left->max_free >= length) 1444 entry = entry->left; 1445 else if (entry->adj_free >= length) { 1446 *addr = entry->end; 1447 return (0); 1448 } else 1449 entry = entry->right; 1450 } 1451 1452 /* Can't get here, so panic if we do. */ 1453 panic("vm_map_findspace: max_free corrupt"); 1454 } 1455 1456 int 1457 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1458 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1459 vm_prot_t max, int cow) 1460 { 1461 vm_offset_t end; 1462 int result; 1463 1464 end = start + length; 1465 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1466 object == NULL, 1467 ("vm_map_fixed: non-NULL backing object for stack")); 1468 vm_map_lock(map); 1469 VM_MAP_RANGE_CHECK(map, start, end); 1470 if ((cow & MAP_CHECK_EXCL) == 0) 1471 vm_map_delete(map, start, end); 1472 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1473 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1474 prot, max, cow); 1475 } else { 1476 result = vm_map_insert(map, object, offset, start, end, 1477 prot, max, cow); 1478 } 1479 vm_map_unlock(map); 1480 return (result); 1481 } 1482 1483 /* 1484 * Searches for the specified amount of free space in the given map with the 1485 * specified alignment. Performs an address-ordered, first-fit search from 1486 * the given address "*addr", with an optional upper bound "max_addr". If the 1487 * parameter "alignment" is zero, then the alignment is computed from the 1488 * given (object, offset) pair so as to enable the greatest possible use of 1489 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1490 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1491 * 1492 * The map must be locked. Initially, there must be at least "length" bytes 1493 * of free space at the given address. 1494 */ 1495 static int 1496 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1497 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1498 vm_offset_t alignment) 1499 { 1500 vm_offset_t aligned_addr, free_addr; 1501 1502 VM_MAP_ASSERT_LOCKED(map); 1503 free_addr = *addr; 1504 KASSERT(!vm_map_findspace(map, free_addr, length, addr) && 1505 free_addr == *addr, ("caller provided insufficient free space")); 1506 for (;;) { 1507 /* 1508 * At the start of every iteration, the free space at address 1509 * "*addr" is at least "length" bytes. 1510 */ 1511 if (alignment == 0) 1512 pmap_align_superpage(object, offset, addr, length); 1513 else if ((*addr & (alignment - 1)) != 0) { 1514 *addr &= ~(alignment - 1); 1515 *addr += alignment; 1516 } 1517 aligned_addr = *addr; 1518 if (aligned_addr == free_addr) { 1519 /* 1520 * Alignment did not change "*addr", so "*addr" must 1521 * still provide sufficient free space. 1522 */ 1523 return (KERN_SUCCESS); 1524 } 1525 1526 /* 1527 * Test for address wrap on "*addr". A wrapped "*addr" could 1528 * be a valid address, in which case vm_map_findspace() cannot 1529 * be relied upon to fail. 1530 */ 1531 if (aligned_addr < free_addr || 1532 vm_map_findspace(map, aligned_addr, length, addr) || 1533 (max_addr != 0 && *addr + length > max_addr)) 1534 return (KERN_NO_SPACE); 1535 free_addr = *addr; 1536 if (free_addr == aligned_addr) { 1537 /* 1538 * If a successful call to vm_map_findspace() did not 1539 * change "*addr", then "*addr" must still be aligned 1540 * and provide sufficient free space. 1541 */ 1542 return (KERN_SUCCESS); 1543 } 1544 } 1545 } 1546 1547 /* 1548 * vm_map_find finds an unallocated region in the target address 1549 * map with the given length. The search is defined to be 1550 * first-fit from the specified address; the region found is 1551 * returned in the same parameter. 1552 * 1553 * If object is non-NULL, ref count must be bumped by caller 1554 * prior to making call to account for the new entry. 1555 */ 1556 int 1557 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1558 vm_offset_t *addr, /* IN/OUT */ 1559 vm_size_t length, vm_offset_t max_addr, int find_space, 1560 vm_prot_t prot, vm_prot_t max, int cow) 1561 { 1562 vm_offset_t alignment, min_addr; 1563 int rv; 1564 1565 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1566 object == NULL, 1567 ("vm_map_find: non-NULL backing object for stack")); 1568 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 1569 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 1570 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1571 (object->flags & OBJ_COLORED) == 0)) 1572 find_space = VMFS_ANY_SPACE; 1573 if (find_space >> 8 != 0) { 1574 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1575 alignment = (vm_offset_t)1 << (find_space >> 8); 1576 } else 1577 alignment = 0; 1578 vm_map_lock(map); 1579 if (find_space != VMFS_NO_SPACE) { 1580 KASSERT(find_space == VMFS_ANY_SPACE || 1581 find_space == VMFS_OPTIMAL_SPACE || 1582 find_space == VMFS_SUPER_SPACE || 1583 alignment != 0, ("unexpected VMFS flag")); 1584 min_addr = *addr; 1585 again: 1586 if (vm_map_findspace(map, min_addr, length, addr) || 1587 (max_addr != 0 && *addr + length > max_addr)) { 1588 rv = KERN_NO_SPACE; 1589 goto done; 1590 } 1591 if (find_space != VMFS_ANY_SPACE && 1592 (rv = vm_map_alignspace(map, object, offset, addr, length, 1593 max_addr, alignment)) != KERN_SUCCESS) { 1594 if (find_space == VMFS_OPTIMAL_SPACE) { 1595 find_space = VMFS_ANY_SPACE; 1596 goto again; 1597 } 1598 goto done; 1599 } 1600 } else if ((cow & MAP_REMAP) != 0) { 1601 if (*addr < vm_map_min(map) || 1602 *addr + length > vm_map_max(map) || 1603 *addr + length <= length) { 1604 rv = KERN_INVALID_ADDRESS; 1605 goto done; 1606 } 1607 vm_map_delete(map, *addr, *addr + length); 1608 } 1609 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1610 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1611 max, cow); 1612 } else { 1613 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1614 prot, max, cow); 1615 } 1616 done: 1617 vm_map_unlock(map); 1618 return (rv); 1619 } 1620 1621 /* 1622 * vm_map_find_min() is a variant of vm_map_find() that takes an 1623 * additional parameter (min_addr) and treats the given address 1624 * (*addr) differently. Specifically, it treats *addr as a hint 1625 * and not as the minimum address where the mapping is created. 1626 * 1627 * This function works in two phases. First, it tries to 1628 * allocate above the hint. If that fails and the hint is 1629 * greater than min_addr, it performs a second pass, replacing 1630 * the hint with min_addr as the minimum address for the 1631 * allocation. 1632 */ 1633 int 1634 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1635 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1636 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1637 int cow) 1638 { 1639 vm_offset_t hint; 1640 int rv; 1641 1642 hint = *addr; 1643 for (;;) { 1644 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1645 find_space, prot, max, cow); 1646 if (rv == KERN_SUCCESS || min_addr >= hint) 1647 return (rv); 1648 *addr = hint = min_addr; 1649 } 1650 } 1651 1652 /* 1653 * A map entry with any of the following flags set must not be merged with 1654 * another entry. 1655 */ 1656 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 1657 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP) 1658 1659 static bool 1660 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 1661 { 1662 1663 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 1664 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 1665 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 1666 prev, entry)); 1667 return (prev->end == entry->start && 1668 prev->object.vm_object == entry->object.vm_object && 1669 (prev->object.vm_object == NULL || 1670 prev->offset + (prev->end - prev->start) == entry->offset) && 1671 prev->eflags == entry->eflags && 1672 prev->protection == entry->protection && 1673 prev->max_protection == entry->max_protection && 1674 prev->inheritance == entry->inheritance && 1675 prev->wired_count == entry->wired_count && 1676 prev->cred == entry->cred); 1677 } 1678 1679 static void 1680 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 1681 { 1682 1683 /* 1684 * If the backing object is a vnode object, vm_object_deallocate() 1685 * calls vrele(). However, vrele() does not lock the vnode because 1686 * the vnode has additional references. Thus, the map lock can be 1687 * kept without causing a lock-order reversal with the vnode lock. 1688 * 1689 * Since we count the number of virtual page mappings in 1690 * object->un_pager.vnp.writemappings, the writemappings value 1691 * should not be adjusted when the entry is disposed of. 1692 */ 1693 if (entry->object.vm_object != NULL) 1694 vm_object_deallocate(entry->object.vm_object); 1695 if (entry->cred != NULL) 1696 crfree(entry->cred); 1697 vm_map_entry_dispose(map, entry); 1698 } 1699 1700 /* 1701 * vm_map_simplify_entry: 1702 * 1703 * Simplify the given map entry by merging with either neighbor. This 1704 * routine also has the ability to merge with both neighbors. 1705 * 1706 * The map must be locked. 1707 * 1708 * This routine guarantees that the passed entry remains valid (though 1709 * possibly extended). When merging, this routine may delete one or 1710 * both neighbors. 1711 */ 1712 void 1713 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1714 { 1715 vm_map_entry_t next, prev; 1716 1717 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0) 1718 return; 1719 prev = entry->prev; 1720 if (vm_map_mergeable_neighbors(prev, entry)) { 1721 vm_map_entry_unlink(map, prev); 1722 entry->start = prev->start; 1723 entry->offset = prev->offset; 1724 if (entry->prev != &map->header) 1725 vm_map_entry_resize_free(map, entry->prev); 1726 vm_map_merged_neighbor_dispose(map, prev); 1727 } 1728 next = entry->next; 1729 if (vm_map_mergeable_neighbors(entry, next)) { 1730 vm_map_entry_unlink(map, next); 1731 entry->end = next->end; 1732 vm_map_entry_resize_free(map, entry); 1733 vm_map_merged_neighbor_dispose(map, next); 1734 } 1735 } 1736 1737 /* 1738 * vm_map_clip_start: [ internal use only ] 1739 * 1740 * Asserts that the given entry begins at or after 1741 * the specified address; if necessary, 1742 * it splits the entry into two. 1743 */ 1744 #define vm_map_clip_start(map, entry, startaddr) \ 1745 { \ 1746 if (startaddr > entry->start) \ 1747 _vm_map_clip_start(map, entry, startaddr); \ 1748 } 1749 1750 /* 1751 * This routine is called only when it is known that 1752 * the entry must be split. 1753 */ 1754 static void 1755 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1756 { 1757 vm_map_entry_t new_entry; 1758 1759 VM_MAP_ASSERT_LOCKED(map); 1760 KASSERT(entry->end > start && entry->start < start, 1761 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 1762 1763 /* 1764 * Split off the front portion -- note that we must insert the new 1765 * entry BEFORE this one, so that this entry has the specified 1766 * starting address. 1767 */ 1768 vm_map_simplify_entry(map, entry); 1769 1770 /* 1771 * If there is no object backing this entry, we might as well create 1772 * one now. If we defer it, an object can get created after the map 1773 * is clipped, and individual objects will be created for the split-up 1774 * map. This is a bit of a hack, but is also about the best place to 1775 * put this improvement. 1776 */ 1777 if (entry->object.vm_object == NULL && !map->system_map && 1778 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1779 vm_object_t object; 1780 object = vm_object_allocate(OBJT_DEFAULT, 1781 atop(entry->end - entry->start)); 1782 entry->object.vm_object = object; 1783 entry->offset = 0; 1784 if (entry->cred != NULL) { 1785 object->cred = entry->cred; 1786 object->charge = entry->end - entry->start; 1787 entry->cred = NULL; 1788 } 1789 } else if (entry->object.vm_object != NULL && 1790 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1791 entry->cred != NULL) { 1792 VM_OBJECT_WLOCK(entry->object.vm_object); 1793 KASSERT(entry->object.vm_object->cred == NULL, 1794 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1795 entry->object.vm_object->cred = entry->cred; 1796 entry->object.vm_object->charge = entry->end - entry->start; 1797 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1798 entry->cred = NULL; 1799 } 1800 1801 new_entry = vm_map_entry_create(map); 1802 *new_entry = *entry; 1803 1804 new_entry->end = start; 1805 entry->offset += (start - entry->start); 1806 entry->start = start; 1807 if (new_entry->cred != NULL) 1808 crhold(entry->cred); 1809 1810 vm_map_entry_link(map, entry->prev, new_entry); 1811 1812 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1813 vm_object_reference(new_entry->object.vm_object); 1814 /* 1815 * The object->un_pager.vnp.writemappings for the 1816 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1817 * kept as is here. The virtual pages are 1818 * re-distributed among the clipped entries, so the sum is 1819 * left the same. 1820 */ 1821 } 1822 } 1823 1824 /* 1825 * vm_map_clip_end: [ internal use only ] 1826 * 1827 * Asserts that the given entry ends at or before 1828 * the specified address; if necessary, 1829 * it splits the entry into two. 1830 */ 1831 #define vm_map_clip_end(map, entry, endaddr) \ 1832 { \ 1833 if ((endaddr) < (entry->end)) \ 1834 _vm_map_clip_end((map), (entry), (endaddr)); \ 1835 } 1836 1837 /* 1838 * This routine is called only when it is known that 1839 * the entry must be split. 1840 */ 1841 static void 1842 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1843 { 1844 vm_map_entry_t new_entry; 1845 1846 VM_MAP_ASSERT_LOCKED(map); 1847 KASSERT(entry->start < end && entry->end > end, 1848 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 1849 1850 /* 1851 * If there is no object backing this entry, we might as well create 1852 * one now. If we defer it, an object can get created after the map 1853 * is clipped, and individual objects will be created for the split-up 1854 * map. This is a bit of a hack, but is also about the best place to 1855 * put this improvement. 1856 */ 1857 if (entry->object.vm_object == NULL && !map->system_map && 1858 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1859 vm_object_t object; 1860 object = vm_object_allocate(OBJT_DEFAULT, 1861 atop(entry->end - entry->start)); 1862 entry->object.vm_object = object; 1863 entry->offset = 0; 1864 if (entry->cred != NULL) { 1865 object->cred = entry->cred; 1866 object->charge = entry->end - entry->start; 1867 entry->cred = NULL; 1868 } 1869 } else if (entry->object.vm_object != NULL && 1870 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1871 entry->cred != NULL) { 1872 VM_OBJECT_WLOCK(entry->object.vm_object); 1873 KASSERT(entry->object.vm_object->cred == NULL, 1874 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1875 entry->object.vm_object->cred = entry->cred; 1876 entry->object.vm_object->charge = entry->end - entry->start; 1877 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1878 entry->cred = NULL; 1879 } 1880 1881 /* 1882 * Create a new entry and insert it AFTER the specified entry 1883 */ 1884 new_entry = vm_map_entry_create(map); 1885 *new_entry = *entry; 1886 1887 new_entry->start = entry->end = end; 1888 new_entry->offset += (end - entry->start); 1889 if (new_entry->cred != NULL) 1890 crhold(entry->cred); 1891 1892 vm_map_entry_link(map, entry, new_entry); 1893 1894 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1895 vm_object_reference(new_entry->object.vm_object); 1896 } 1897 } 1898 1899 /* 1900 * vm_map_submap: [ kernel use only ] 1901 * 1902 * Mark the given range as handled by a subordinate map. 1903 * 1904 * This range must have been created with vm_map_find, 1905 * and no other operations may have been performed on this 1906 * range prior to calling vm_map_submap. 1907 * 1908 * Only a limited number of operations can be performed 1909 * within this rage after calling vm_map_submap: 1910 * vm_fault 1911 * [Don't try vm_map_copy!] 1912 * 1913 * To remove a submapping, one must first remove the 1914 * range from the superior map, and then destroy the 1915 * submap (if desired). [Better yet, don't try it.] 1916 */ 1917 int 1918 vm_map_submap( 1919 vm_map_t map, 1920 vm_offset_t start, 1921 vm_offset_t end, 1922 vm_map_t submap) 1923 { 1924 vm_map_entry_t entry; 1925 int result = KERN_INVALID_ARGUMENT; 1926 1927 vm_map_lock(map); 1928 1929 VM_MAP_RANGE_CHECK(map, start, end); 1930 1931 if (vm_map_lookup_entry(map, start, &entry)) { 1932 vm_map_clip_start(map, entry, start); 1933 } else 1934 entry = entry->next; 1935 1936 vm_map_clip_end(map, entry, end); 1937 1938 if ((entry->start == start) && (entry->end == end) && 1939 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1940 (entry->object.vm_object == NULL)) { 1941 entry->object.sub_map = submap; 1942 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1943 result = KERN_SUCCESS; 1944 } 1945 vm_map_unlock(map); 1946 1947 return (result); 1948 } 1949 1950 /* 1951 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1952 */ 1953 #define MAX_INIT_PT 96 1954 1955 /* 1956 * vm_map_pmap_enter: 1957 * 1958 * Preload the specified map's pmap with mappings to the specified 1959 * object's memory-resident pages. No further physical pages are 1960 * allocated, and no further virtual pages are retrieved from secondary 1961 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1962 * limited number of page mappings are created at the low-end of the 1963 * specified address range. (For this purpose, a superpage mapping 1964 * counts as one page mapping.) Otherwise, all resident pages within 1965 * the specified address range are mapped. 1966 */ 1967 static void 1968 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1969 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1970 { 1971 vm_offset_t start; 1972 vm_page_t p, p_start; 1973 vm_pindex_t mask, psize, threshold, tmpidx; 1974 1975 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1976 return; 1977 VM_OBJECT_RLOCK(object); 1978 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1979 VM_OBJECT_RUNLOCK(object); 1980 VM_OBJECT_WLOCK(object); 1981 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1982 pmap_object_init_pt(map->pmap, addr, object, pindex, 1983 size); 1984 VM_OBJECT_WUNLOCK(object); 1985 return; 1986 } 1987 VM_OBJECT_LOCK_DOWNGRADE(object); 1988 } 1989 1990 psize = atop(size); 1991 if (psize + pindex > object->size) { 1992 if (object->size < pindex) { 1993 VM_OBJECT_RUNLOCK(object); 1994 return; 1995 } 1996 psize = object->size - pindex; 1997 } 1998 1999 start = 0; 2000 p_start = NULL; 2001 threshold = MAX_INIT_PT; 2002 2003 p = vm_page_find_least(object, pindex); 2004 /* 2005 * Assert: the variable p is either (1) the page with the 2006 * least pindex greater than or equal to the parameter pindex 2007 * or (2) NULL. 2008 */ 2009 for (; 2010 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2011 p = TAILQ_NEXT(p, listq)) { 2012 /* 2013 * don't allow an madvise to blow away our really 2014 * free pages allocating pv entries. 2015 */ 2016 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2017 vm_page_count_severe()) || 2018 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2019 tmpidx >= threshold)) { 2020 psize = tmpidx; 2021 break; 2022 } 2023 if (p->valid == VM_PAGE_BITS_ALL) { 2024 if (p_start == NULL) { 2025 start = addr + ptoa(tmpidx); 2026 p_start = p; 2027 } 2028 /* Jump ahead if a superpage mapping is possible. */ 2029 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2030 (pagesizes[p->psind] - 1)) == 0) { 2031 mask = atop(pagesizes[p->psind]) - 1; 2032 if (tmpidx + mask < psize && 2033 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2034 p += mask; 2035 threshold += mask; 2036 } 2037 } 2038 } else if (p_start != NULL) { 2039 pmap_enter_object(map->pmap, start, addr + 2040 ptoa(tmpidx), p_start, prot); 2041 p_start = NULL; 2042 } 2043 } 2044 if (p_start != NULL) 2045 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2046 p_start, prot); 2047 VM_OBJECT_RUNLOCK(object); 2048 } 2049 2050 /* 2051 * vm_map_protect: 2052 * 2053 * Sets the protection of the specified address 2054 * region in the target map. If "set_max" is 2055 * specified, the maximum protection is to be set; 2056 * otherwise, only the current protection is affected. 2057 */ 2058 int 2059 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2060 vm_prot_t new_prot, boolean_t set_max) 2061 { 2062 vm_map_entry_t current, entry; 2063 vm_object_t obj; 2064 struct ucred *cred; 2065 vm_prot_t old_prot; 2066 2067 if (start == end) 2068 return (KERN_SUCCESS); 2069 2070 vm_map_lock(map); 2071 2072 /* 2073 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2074 * need to fault pages into the map and will drop the map lock while 2075 * doing so, and the VM object may end up in an inconsistent state if we 2076 * update the protection on the map entry in between faults. 2077 */ 2078 vm_map_wait_busy(map); 2079 2080 VM_MAP_RANGE_CHECK(map, start, end); 2081 2082 if (vm_map_lookup_entry(map, start, &entry)) { 2083 vm_map_clip_start(map, entry, start); 2084 } else { 2085 entry = entry->next; 2086 } 2087 2088 /* 2089 * Make a first pass to check for protection violations. 2090 */ 2091 for (current = entry; current->start < end; current = current->next) { 2092 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2093 continue; 2094 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2095 vm_map_unlock(map); 2096 return (KERN_INVALID_ARGUMENT); 2097 } 2098 if ((new_prot & current->max_protection) != new_prot) { 2099 vm_map_unlock(map); 2100 return (KERN_PROTECTION_FAILURE); 2101 } 2102 } 2103 2104 /* 2105 * Do an accounting pass for private read-only mappings that 2106 * now will do cow due to allowed write (e.g. debugger sets 2107 * breakpoint on text segment) 2108 */ 2109 for (current = entry; current->start < end; current = current->next) { 2110 2111 vm_map_clip_end(map, current, end); 2112 2113 if (set_max || 2114 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2115 ENTRY_CHARGED(current) || 2116 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2117 continue; 2118 } 2119 2120 cred = curthread->td_ucred; 2121 obj = current->object.vm_object; 2122 2123 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2124 if (!swap_reserve(current->end - current->start)) { 2125 vm_map_unlock(map); 2126 return (KERN_RESOURCE_SHORTAGE); 2127 } 2128 crhold(cred); 2129 current->cred = cred; 2130 continue; 2131 } 2132 2133 VM_OBJECT_WLOCK(obj); 2134 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2135 VM_OBJECT_WUNLOCK(obj); 2136 continue; 2137 } 2138 2139 /* 2140 * Charge for the whole object allocation now, since 2141 * we cannot distinguish between non-charged and 2142 * charged clipped mapping of the same object later. 2143 */ 2144 KASSERT(obj->charge == 0, 2145 ("vm_map_protect: object %p overcharged (entry %p)", 2146 obj, current)); 2147 if (!swap_reserve(ptoa(obj->size))) { 2148 VM_OBJECT_WUNLOCK(obj); 2149 vm_map_unlock(map); 2150 return (KERN_RESOURCE_SHORTAGE); 2151 } 2152 2153 crhold(cred); 2154 obj->cred = cred; 2155 obj->charge = ptoa(obj->size); 2156 VM_OBJECT_WUNLOCK(obj); 2157 } 2158 2159 /* 2160 * Go back and fix up protections. [Note that clipping is not 2161 * necessary the second time.] 2162 */ 2163 for (current = entry; current->start < end; current = current->next) { 2164 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2165 continue; 2166 2167 old_prot = current->protection; 2168 2169 if (set_max) 2170 current->protection = 2171 (current->max_protection = new_prot) & 2172 old_prot; 2173 else 2174 current->protection = new_prot; 2175 2176 /* 2177 * For user wired map entries, the normal lazy evaluation of 2178 * write access upgrades through soft page faults is 2179 * undesirable. Instead, immediately copy any pages that are 2180 * copy-on-write and enable write access in the physical map. 2181 */ 2182 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2183 (current->protection & VM_PROT_WRITE) != 0 && 2184 (old_prot & VM_PROT_WRITE) == 0) 2185 vm_fault_copy_entry(map, map, current, current, NULL); 2186 2187 /* 2188 * When restricting access, update the physical map. Worry 2189 * about copy-on-write here. 2190 */ 2191 if ((old_prot & ~current->protection) != 0) { 2192 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2193 VM_PROT_ALL) 2194 pmap_protect(map->pmap, current->start, 2195 current->end, 2196 current->protection & MASK(current)); 2197 #undef MASK 2198 } 2199 vm_map_simplify_entry(map, current); 2200 } 2201 vm_map_unlock(map); 2202 return (KERN_SUCCESS); 2203 } 2204 2205 /* 2206 * vm_map_madvise: 2207 * 2208 * This routine traverses a processes map handling the madvise 2209 * system call. Advisories are classified as either those effecting 2210 * the vm_map_entry structure, or those effecting the underlying 2211 * objects. 2212 */ 2213 int 2214 vm_map_madvise( 2215 vm_map_t map, 2216 vm_offset_t start, 2217 vm_offset_t end, 2218 int behav) 2219 { 2220 vm_map_entry_t current, entry; 2221 bool modify_map; 2222 2223 /* 2224 * Some madvise calls directly modify the vm_map_entry, in which case 2225 * we need to use an exclusive lock on the map and we need to perform 2226 * various clipping operations. Otherwise we only need a read-lock 2227 * on the map. 2228 */ 2229 switch(behav) { 2230 case MADV_NORMAL: 2231 case MADV_SEQUENTIAL: 2232 case MADV_RANDOM: 2233 case MADV_NOSYNC: 2234 case MADV_AUTOSYNC: 2235 case MADV_NOCORE: 2236 case MADV_CORE: 2237 if (start == end) 2238 return (0); 2239 modify_map = true; 2240 vm_map_lock(map); 2241 break; 2242 case MADV_WILLNEED: 2243 case MADV_DONTNEED: 2244 case MADV_FREE: 2245 if (start == end) 2246 return (0); 2247 modify_map = false; 2248 vm_map_lock_read(map); 2249 break; 2250 default: 2251 return (EINVAL); 2252 } 2253 2254 /* 2255 * Locate starting entry and clip if necessary. 2256 */ 2257 VM_MAP_RANGE_CHECK(map, start, end); 2258 2259 if (vm_map_lookup_entry(map, start, &entry)) { 2260 if (modify_map) 2261 vm_map_clip_start(map, entry, start); 2262 } else { 2263 entry = entry->next; 2264 } 2265 2266 if (modify_map) { 2267 /* 2268 * madvise behaviors that are implemented in the vm_map_entry. 2269 * 2270 * We clip the vm_map_entry so that behavioral changes are 2271 * limited to the specified address range. 2272 */ 2273 for (current = entry; current->start < end; 2274 current = current->next) { 2275 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2276 continue; 2277 2278 vm_map_clip_end(map, current, end); 2279 2280 switch (behav) { 2281 case MADV_NORMAL: 2282 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2283 break; 2284 case MADV_SEQUENTIAL: 2285 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2286 break; 2287 case MADV_RANDOM: 2288 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2289 break; 2290 case MADV_NOSYNC: 2291 current->eflags |= MAP_ENTRY_NOSYNC; 2292 break; 2293 case MADV_AUTOSYNC: 2294 current->eflags &= ~MAP_ENTRY_NOSYNC; 2295 break; 2296 case MADV_NOCORE: 2297 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2298 break; 2299 case MADV_CORE: 2300 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2301 break; 2302 default: 2303 break; 2304 } 2305 vm_map_simplify_entry(map, current); 2306 } 2307 vm_map_unlock(map); 2308 } else { 2309 vm_pindex_t pstart, pend; 2310 2311 /* 2312 * madvise behaviors that are implemented in the underlying 2313 * vm_object. 2314 * 2315 * Since we don't clip the vm_map_entry, we have to clip 2316 * the vm_object pindex and count. 2317 */ 2318 for (current = entry; current->start < end; 2319 current = current->next) { 2320 vm_offset_t useEnd, useStart; 2321 2322 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2323 continue; 2324 2325 pstart = OFF_TO_IDX(current->offset); 2326 pend = pstart + atop(current->end - current->start); 2327 useStart = current->start; 2328 useEnd = current->end; 2329 2330 if (current->start < start) { 2331 pstart += atop(start - current->start); 2332 useStart = start; 2333 } 2334 if (current->end > end) { 2335 pend -= atop(current->end - end); 2336 useEnd = end; 2337 } 2338 2339 if (pstart >= pend) 2340 continue; 2341 2342 /* 2343 * Perform the pmap_advise() before clearing 2344 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2345 * concurrent pmap operation, such as pmap_remove(), 2346 * could clear a reference in the pmap and set 2347 * PGA_REFERENCED on the page before the pmap_advise() 2348 * had completed. Consequently, the page would appear 2349 * referenced based upon an old reference that 2350 * occurred before this pmap_advise() ran. 2351 */ 2352 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2353 pmap_advise(map->pmap, useStart, useEnd, 2354 behav); 2355 2356 vm_object_madvise(current->object.vm_object, pstart, 2357 pend, behav); 2358 2359 /* 2360 * Pre-populate paging structures in the 2361 * WILLNEED case. For wired entries, the 2362 * paging structures are already populated. 2363 */ 2364 if (behav == MADV_WILLNEED && 2365 current->wired_count == 0) { 2366 vm_map_pmap_enter(map, 2367 useStart, 2368 current->protection, 2369 current->object.vm_object, 2370 pstart, 2371 ptoa(pend - pstart), 2372 MAP_PREFAULT_MADVISE 2373 ); 2374 } 2375 } 2376 vm_map_unlock_read(map); 2377 } 2378 return (0); 2379 } 2380 2381 2382 /* 2383 * vm_map_inherit: 2384 * 2385 * Sets the inheritance of the specified address 2386 * range in the target map. Inheritance 2387 * affects how the map will be shared with 2388 * child maps at the time of vmspace_fork. 2389 */ 2390 int 2391 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2392 vm_inherit_t new_inheritance) 2393 { 2394 vm_map_entry_t entry; 2395 vm_map_entry_t temp_entry; 2396 2397 switch (new_inheritance) { 2398 case VM_INHERIT_NONE: 2399 case VM_INHERIT_COPY: 2400 case VM_INHERIT_SHARE: 2401 case VM_INHERIT_ZERO: 2402 break; 2403 default: 2404 return (KERN_INVALID_ARGUMENT); 2405 } 2406 if (start == end) 2407 return (KERN_SUCCESS); 2408 vm_map_lock(map); 2409 VM_MAP_RANGE_CHECK(map, start, end); 2410 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2411 entry = temp_entry; 2412 vm_map_clip_start(map, entry, start); 2413 } else 2414 entry = temp_entry->next; 2415 while (entry->start < end) { 2416 vm_map_clip_end(map, entry, end); 2417 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2418 new_inheritance != VM_INHERIT_ZERO) 2419 entry->inheritance = new_inheritance; 2420 vm_map_simplify_entry(map, entry); 2421 entry = entry->next; 2422 } 2423 vm_map_unlock(map); 2424 return (KERN_SUCCESS); 2425 } 2426 2427 /* 2428 * vm_map_unwire: 2429 * 2430 * Implements both kernel and user unwiring. 2431 */ 2432 int 2433 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2434 int flags) 2435 { 2436 vm_map_entry_t entry, first_entry, tmp_entry; 2437 vm_offset_t saved_start; 2438 unsigned int last_timestamp; 2439 int rv; 2440 boolean_t need_wakeup, result, user_unwire; 2441 2442 if (start == end) 2443 return (KERN_SUCCESS); 2444 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2445 vm_map_lock(map); 2446 VM_MAP_RANGE_CHECK(map, start, end); 2447 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2448 if (flags & VM_MAP_WIRE_HOLESOK) 2449 first_entry = first_entry->next; 2450 else { 2451 vm_map_unlock(map); 2452 return (KERN_INVALID_ADDRESS); 2453 } 2454 } 2455 last_timestamp = map->timestamp; 2456 entry = first_entry; 2457 while (entry->start < end) { 2458 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2459 /* 2460 * We have not yet clipped the entry. 2461 */ 2462 saved_start = (start >= entry->start) ? start : 2463 entry->start; 2464 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2465 if (vm_map_unlock_and_wait(map, 0)) { 2466 /* 2467 * Allow interruption of user unwiring? 2468 */ 2469 } 2470 vm_map_lock(map); 2471 if (last_timestamp+1 != map->timestamp) { 2472 /* 2473 * Look again for the entry because the map was 2474 * modified while it was unlocked. 2475 * Specifically, the entry may have been 2476 * clipped, merged, or deleted. 2477 */ 2478 if (!vm_map_lookup_entry(map, saved_start, 2479 &tmp_entry)) { 2480 if (flags & VM_MAP_WIRE_HOLESOK) 2481 tmp_entry = tmp_entry->next; 2482 else { 2483 if (saved_start == start) { 2484 /* 2485 * First_entry has been deleted. 2486 */ 2487 vm_map_unlock(map); 2488 return (KERN_INVALID_ADDRESS); 2489 } 2490 end = saved_start; 2491 rv = KERN_INVALID_ADDRESS; 2492 goto done; 2493 } 2494 } 2495 if (entry == first_entry) 2496 first_entry = tmp_entry; 2497 else 2498 first_entry = NULL; 2499 entry = tmp_entry; 2500 } 2501 last_timestamp = map->timestamp; 2502 continue; 2503 } 2504 vm_map_clip_start(map, entry, start); 2505 vm_map_clip_end(map, entry, end); 2506 /* 2507 * Mark the entry in case the map lock is released. (See 2508 * above.) 2509 */ 2510 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2511 entry->wiring_thread == NULL, 2512 ("owned map entry %p", entry)); 2513 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2514 entry->wiring_thread = curthread; 2515 /* 2516 * Check the map for holes in the specified region. 2517 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2518 */ 2519 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2520 (entry->end < end && entry->next->start > entry->end)) { 2521 end = entry->end; 2522 rv = KERN_INVALID_ADDRESS; 2523 goto done; 2524 } 2525 /* 2526 * If system unwiring, require that the entry is system wired. 2527 */ 2528 if (!user_unwire && 2529 vm_map_entry_system_wired_count(entry) == 0) { 2530 end = entry->end; 2531 rv = KERN_INVALID_ARGUMENT; 2532 goto done; 2533 } 2534 entry = entry->next; 2535 } 2536 rv = KERN_SUCCESS; 2537 done: 2538 need_wakeup = FALSE; 2539 if (first_entry == NULL) { 2540 result = vm_map_lookup_entry(map, start, &first_entry); 2541 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2542 first_entry = first_entry->next; 2543 else 2544 KASSERT(result, ("vm_map_unwire: lookup failed")); 2545 } 2546 for (entry = first_entry; entry->start < end; entry = entry->next) { 2547 /* 2548 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2549 * space in the unwired region could have been mapped 2550 * while the map lock was dropped for draining 2551 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2552 * could be simultaneously wiring this new mapping 2553 * entry. Detect these cases and skip any entries 2554 * marked as in transition by us. 2555 */ 2556 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2557 entry->wiring_thread != curthread) { 2558 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2559 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2560 continue; 2561 } 2562 2563 if (rv == KERN_SUCCESS && (!user_unwire || 2564 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2565 if (user_unwire) 2566 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2567 if (entry->wired_count == 1) 2568 vm_map_entry_unwire(map, entry); 2569 else 2570 entry->wired_count--; 2571 } 2572 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2573 ("vm_map_unwire: in-transition flag missing %p", entry)); 2574 KASSERT(entry->wiring_thread == curthread, 2575 ("vm_map_unwire: alien wire %p", entry)); 2576 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2577 entry->wiring_thread = NULL; 2578 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2579 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2580 need_wakeup = TRUE; 2581 } 2582 vm_map_simplify_entry(map, entry); 2583 } 2584 vm_map_unlock(map); 2585 if (need_wakeup) 2586 vm_map_wakeup(map); 2587 return (rv); 2588 } 2589 2590 /* 2591 * vm_map_wire_entry_failure: 2592 * 2593 * Handle a wiring failure on the given entry. 2594 * 2595 * The map should be locked. 2596 */ 2597 static void 2598 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2599 vm_offset_t failed_addr) 2600 { 2601 2602 VM_MAP_ASSERT_LOCKED(map); 2603 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2604 entry->wired_count == 1, 2605 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2606 KASSERT(failed_addr < entry->end, 2607 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2608 2609 /* 2610 * If any pages at the start of this entry were successfully wired, 2611 * then unwire them. 2612 */ 2613 if (failed_addr > entry->start) { 2614 pmap_unwire(map->pmap, entry->start, failed_addr); 2615 vm_object_unwire(entry->object.vm_object, entry->offset, 2616 failed_addr - entry->start, PQ_ACTIVE); 2617 } 2618 2619 /* 2620 * Assign an out-of-range value to represent the failure to wire this 2621 * entry. 2622 */ 2623 entry->wired_count = -1; 2624 } 2625 2626 /* 2627 * vm_map_wire: 2628 * 2629 * Implements both kernel and user wiring. 2630 */ 2631 int 2632 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2633 int flags) 2634 { 2635 vm_map_entry_t entry, first_entry, tmp_entry; 2636 vm_offset_t faddr, saved_end, saved_start; 2637 unsigned int last_timestamp; 2638 int rv; 2639 boolean_t need_wakeup, result, user_wire; 2640 vm_prot_t prot; 2641 2642 if (start == end) 2643 return (KERN_SUCCESS); 2644 prot = 0; 2645 if (flags & VM_MAP_WIRE_WRITE) 2646 prot |= VM_PROT_WRITE; 2647 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2648 vm_map_lock(map); 2649 VM_MAP_RANGE_CHECK(map, start, end); 2650 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2651 if (flags & VM_MAP_WIRE_HOLESOK) 2652 first_entry = first_entry->next; 2653 else { 2654 vm_map_unlock(map); 2655 return (KERN_INVALID_ADDRESS); 2656 } 2657 } 2658 last_timestamp = map->timestamp; 2659 entry = first_entry; 2660 while (entry->start < end) { 2661 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2662 /* 2663 * We have not yet clipped the entry. 2664 */ 2665 saved_start = (start >= entry->start) ? start : 2666 entry->start; 2667 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2668 if (vm_map_unlock_and_wait(map, 0)) { 2669 /* 2670 * Allow interruption of user wiring? 2671 */ 2672 } 2673 vm_map_lock(map); 2674 if (last_timestamp + 1 != map->timestamp) { 2675 /* 2676 * Look again for the entry because the map was 2677 * modified while it was unlocked. 2678 * Specifically, the entry may have been 2679 * clipped, merged, or deleted. 2680 */ 2681 if (!vm_map_lookup_entry(map, saved_start, 2682 &tmp_entry)) { 2683 if (flags & VM_MAP_WIRE_HOLESOK) 2684 tmp_entry = tmp_entry->next; 2685 else { 2686 if (saved_start == start) { 2687 /* 2688 * first_entry has been deleted. 2689 */ 2690 vm_map_unlock(map); 2691 return (KERN_INVALID_ADDRESS); 2692 } 2693 end = saved_start; 2694 rv = KERN_INVALID_ADDRESS; 2695 goto done; 2696 } 2697 } 2698 if (entry == first_entry) 2699 first_entry = tmp_entry; 2700 else 2701 first_entry = NULL; 2702 entry = tmp_entry; 2703 } 2704 last_timestamp = map->timestamp; 2705 continue; 2706 } 2707 vm_map_clip_start(map, entry, start); 2708 vm_map_clip_end(map, entry, end); 2709 /* 2710 * Mark the entry in case the map lock is released. (See 2711 * above.) 2712 */ 2713 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2714 entry->wiring_thread == NULL, 2715 ("owned map entry %p", entry)); 2716 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2717 entry->wiring_thread = curthread; 2718 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2719 || (entry->protection & prot) != prot) { 2720 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2721 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2722 end = entry->end; 2723 rv = KERN_INVALID_ADDRESS; 2724 goto done; 2725 } 2726 goto next_entry; 2727 } 2728 if (entry->wired_count == 0) { 2729 entry->wired_count++; 2730 saved_start = entry->start; 2731 saved_end = entry->end; 2732 2733 /* 2734 * Release the map lock, relying on the in-transition 2735 * mark. Mark the map busy for fork. 2736 */ 2737 vm_map_busy(map); 2738 vm_map_unlock(map); 2739 2740 faddr = saved_start; 2741 do { 2742 /* 2743 * Simulate a fault to get the page and enter 2744 * it into the physical map. 2745 */ 2746 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 2747 VM_FAULT_WIRE)) != KERN_SUCCESS) 2748 break; 2749 } while ((faddr += PAGE_SIZE) < saved_end); 2750 vm_map_lock(map); 2751 vm_map_unbusy(map); 2752 if (last_timestamp + 1 != map->timestamp) { 2753 /* 2754 * Look again for the entry because the map was 2755 * modified while it was unlocked. The entry 2756 * may have been clipped, but NOT merged or 2757 * deleted. 2758 */ 2759 result = vm_map_lookup_entry(map, saved_start, 2760 &tmp_entry); 2761 KASSERT(result, ("vm_map_wire: lookup failed")); 2762 if (entry == first_entry) 2763 first_entry = tmp_entry; 2764 else 2765 first_entry = NULL; 2766 entry = tmp_entry; 2767 while (entry->end < saved_end) { 2768 /* 2769 * In case of failure, handle entries 2770 * that were not fully wired here; 2771 * fully wired entries are handled 2772 * later. 2773 */ 2774 if (rv != KERN_SUCCESS && 2775 faddr < entry->end) 2776 vm_map_wire_entry_failure(map, 2777 entry, faddr); 2778 entry = entry->next; 2779 } 2780 } 2781 last_timestamp = map->timestamp; 2782 if (rv != KERN_SUCCESS) { 2783 vm_map_wire_entry_failure(map, entry, faddr); 2784 end = entry->end; 2785 goto done; 2786 } 2787 } else if (!user_wire || 2788 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2789 entry->wired_count++; 2790 } 2791 /* 2792 * Check the map for holes in the specified region. 2793 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2794 */ 2795 next_entry: 2796 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 2797 entry->end < end && entry->next->start > entry->end) { 2798 end = entry->end; 2799 rv = KERN_INVALID_ADDRESS; 2800 goto done; 2801 } 2802 entry = entry->next; 2803 } 2804 rv = KERN_SUCCESS; 2805 done: 2806 need_wakeup = FALSE; 2807 if (first_entry == NULL) { 2808 result = vm_map_lookup_entry(map, start, &first_entry); 2809 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2810 first_entry = first_entry->next; 2811 else 2812 KASSERT(result, ("vm_map_wire: lookup failed")); 2813 } 2814 for (entry = first_entry; entry->start < end; entry = entry->next) { 2815 /* 2816 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2817 * space in the unwired region could have been mapped 2818 * while the map lock was dropped for faulting in the 2819 * pages or draining MAP_ENTRY_IN_TRANSITION. 2820 * Moreover, another thread could be simultaneously 2821 * wiring this new mapping entry. Detect these cases 2822 * and skip any entries marked as in transition not by us. 2823 */ 2824 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2825 entry->wiring_thread != curthread) { 2826 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2827 ("vm_map_wire: !HOLESOK and new/changed entry")); 2828 continue; 2829 } 2830 2831 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2832 goto next_entry_done; 2833 2834 if (rv == KERN_SUCCESS) { 2835 if (user_wire) 2836 entry->eflags |= MAP_ENTRY_USER_WIRED; 2837 } else if (entry->wired_count == -1) { 2838 /* 2839 * Wiring failed on this entry. Thus, unwiring is 2840 * unnecessary. 2841 */ 2842 entry->wired_count = 0; 2843 } else if (!user_wire || 2844 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2845 /* 2846 * Undo the wiring. Wiring succeeded on this entry 2847 * but failed on a later entry. 2848 */ 2849 if (entry->wired_count == 1) 2850 vm_map_entry_unwire(map, entry); 2851 else 2852 entry->wired_count--; 2853 } 2854 next_entry_done: 2855 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2856 ("vm_map_wire: in-transition flag missing %p", entry)); 2857 KASSERT(entry->wiring_thread == curthread, 2858 ("vm_map_wire: alien wire %p", entry)); 2859 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2860 MAP_ENTRY_WIRE_SKIPPED); 2861 entry->wiring_thread = NULL; 2862 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2863 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2864 need_wakeup = TRUE; 2865 } 2866 vm_map_simplify_entry(map, entry); 2867 } 2868 vm_map_unlock(map); 2869 if (need_wakeup) 2870 vm_map_wakeup(map); 2871 return (rv); 2872 } 2873 2874 /* 2875 * vm_map_sync 2876 * 2877 * Push any dirty cached pages in the address range to their pager. 2878 * If syncio is TRUE, dirty pages are written synchronously. 2879 * If invalidate is TRUE, any cached pages are freed as well. 2880 * 2881 * If the size of the region from start to end is zero, we are 2882 * supposed to flush all modified pages within the region containing 2883 * start. Unfortunately, a region can be split or coalesced with 2884 * neighboring regions, making it difficult to determine what the 2885 * original region was. Therefore, we approximate this requirement by 2886 * flushing the current region containing start. 2887 * 2888 * Returns an error if any part of the specified range is not mapped. 2889 */ 2890 int 2891 vm_map_sync( 2892 vm_map_t map, 2893 vm_offset_t start, 2894 vm_offset_t end, 2895 boolean_t syncio, 2896 boolean_t invalidate) 2897 { 2898 vm_map_entry_t current; 2899 vm_map_entry_t entry; 2900 vm_size_t size; 2901 vm_object_t object; 2902 vm_ooffset_t offset; 2903 unsigned int last_timestamp; 2904 boolean_t failed; 2905 2906 vm_map_lock_read(map); 2907 VM_MAP_RANGE_CHECK(map, start, end); 2908 if (!vm_map_lookup_entry(map, start, &entry)) { 2909 vm_map_unlock_read(map); 2910 return (KERN_INVALID_ADDRESS); 2911 } else if (start == end) { 2912 start = entry->start; 2913 end = entry->end; 2914 } 2915 /* 2916 * Make a first pass to check for user-wired memory and holes. 2917 */ 2918 for (current = entry; current->start < end; current = current->next) { 2919 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2920 vm_map_unlock_read(map); 2921 return (KERN_INVALID_ARGUMENT); 2922 } 2923 if (end > current->end && 2924 current->end != current->next->start) { 2925 vm_map_unlock_read(map); 2926 return (KERN_INVALID_ADDRESS); 2927 } 2928 } 2929 2930 if (invalidate) 2931 pmap_remove(map->pmap, start, end); 2932 failed = FALSE; 2933 2934 /* 2935 * Make a second pass, cleaning/uncaching pages from the indicated 2936 * objects as we go. 2937 */ 2938 for (current = entry; current->start < end;) { 2939 offset = current->offset + (start - current->start); 2940 size = (end <= current->end ? end : current->end) - start; 2941 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2942 vm_map_t smap; 2943 vm_map_entry_t tentry; 2944 vm_size_t tsize; 2945 2946 smap = current->object.sub_map; 2947 vm_map_lock_read(smap); 2948 (void) vm_map_lookup_entry(smap, offset, &tentry); 2949 tsize = tentry->end - offset; 2950 if (tsize < size) 2951 size = tsize; 2952 object = tentry->object.vm_object; 2953 offset = tentry->offset + (offset - tentry->start); 2954 vm_map_unlock_read(smap); 2955 } else { 2956 object = current->object.vm_object; 2957 } 2958 vm_object_reference(object); 2959 last_timestamp = map->timestamp; 2960 vm_map_unlock_read(map); 2961 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2962 failed = TRUE; 2963 start += size; 2964 vm_object_deallocate(object); 2965 vm_map_lock_read(map); 2966 if (last_timestamp == map->timestamp || 2967 !vm_map_lookup_entry(map, start, ¤t)) 2968 current = current->next; 2969 } 2970 2971 vm_map_unlock_read(map); 2972 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2973 } 2974 2975 /* 2976 * vm_map_entry_unwire: [ internal use only ] 2977 * 2978 * Make the region specified by this entry pageable. 2979 * 2980 * The map in question should be locked. 2981 * [This is the reason for this routine's existence.] 2982 */ 2983 static void 2984 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2985 { 2986 2987 VM_MAP_ASSERT_LOCKED(map); 2988 KASSERT(entry->wired_count > 0, 2989 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 2990 pmap_unwire(map->pmap, entry->start, entry->end); 2991 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 2992 entry->start, PQ_ACTIVE); 2993 entry->wired_count = 0; 2994 } 2995 2996 static void 2997 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2998 { 2999 3000 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3001 vm_object_deallocate(entry->object.vm_object); 3002 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3003 } 3004 3005 /* 3006 * vm_map_entry_delete: [ internal use only ] 3007 * 3008 * Deallocate the given entry from the target map. 3009 */ 3010 static void 3011 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3012 { 3013 vm_object_t object; 3014 vm_pindex_t offidxstart, offidxend, count, size1; 3015 vm_size_t size; 3016 3017 vm_map_entry_unlink(map, entry); 3018 object = entry->object.vm_object; 3019 3020 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3021 MPASS(entry->cred == NULL); 3022 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3023 MPASS(object == NULL); 3024 vm_map_entry_deallocate(entry, map->system_map); 3025 return; 3026 } 3027 3028 size = entry->end - entry->start; 3029 map->size -= size; 3030 3031 if (entry->cred != NULL) { 3032 swap_release_by_cred(size, entry->cred); 3033 crfree(entry->cred); 3034 } 3035 3036 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 3037 (object != NULL)) { 3038 KASSERT(entry->cred == NULL || object->cred == NULL || 3039 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3040 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3041 count = atop(size); 3042 offidxstart = OFF_TO_IDX(entry->offset); 3043 offidxend = offidxstart + count; 3044 VM_OBJECT_WLOCK(object); 3045 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 3046 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 3047 object == kernel_object)) { 3048 vm_object_collapse(object); 3049 3050 /* 3051 * The option OBJPR_NOTMAPPED can be passed here 3052 * because vm_map_delete() already performed 3053 * pmap_remove() on the only mapping to this range 3054 * of pages. 3055 */ 3056 vm_object_page_remove(object, offidxstart, offidxend, 3057 OBJPR_NOTMAPPED); 3058 if (object->type == OBJT_SWAP) 3059 swap_pager_freespace(object, offidxstart, 3060 count); 3061 if (offidxend >= object->size && 3062 offidxstart < object->size) { 3063 size1 = object->size; 3064 object->size = offidxstart; 3065 if (object->cred != NULL) { 3066 size1 -= object->size; 3067 KASSERT(object->charge >= ptoa(size1), 3068 ("object %p charge < 0", object)); 3069 swap_release_by_cred(ptoa(size1), 3070 object->cred); 3071 object->charge -= ptoa(size1); 3072 } 3073 } 3074 } 3075 VM_OBJECT_WUNLOCK(object); 3076 } else 3077 entry->object.vm_object = NULL; 3078 if (map->system_map) 3079 vm_map_entry_deallocate(entry, TRUE); 3080 else { 3081 entry->next = curthread->td_map_def_user; 3082 curthread->td_map_def_user = entry; 3083 } 3084 } 3085 3086 /* 3087 * vm_map_delete: [ internal use only ] 3088 * 3089 * Deallocates the given address range from the target 3090 * map. 3091 */ 3092 int 3093 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3094 { 3095 vm_map_entry_t entry; 3096 vm_map_entry_t first_entry; 3097 3098 VM_MAP_ASSERT_LOCKED(map); 3099 if (start == end) 3100 return (KERN_SUCCESS); 3101 3102 /* 3103 * Find the start of the region, and clip it 3104 */ 3105 if (!vm_map_lookup_entry(map, start, &first_entry)) 3106 entry = first_entry->next; 3107 else { 3108 entry = first_entry; 3109 vm_map_clip_start(map, entry, start); 3110 } 3111 3112 /* 3113 * Step through all entries in this region 3114 */ 3115 while (entry->start < end) { 3116 vm_map_entry_t next; 3117 3118 /* 3119 * Wait for wiring or unwiring of an entry to complete. 3120 * Also wait for any system wirings to disappear on 3121 * user maps. 3122 */ 3123 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3124 (vm_map_pmap(map) != kernel_pmap && 3125 vm_map_entry_system_wired_count(entry) != 0)) { 3126 unsigned int last_timestamp; 3127 vm_offset_t saved_start; 3128 vm_map_entry_t tmp_entry; 3129 3130 saved_start = entry->start; 3131 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3132 last_timestamp = map->timestamp; 3133 (void) vm_map_unlock_and_wait(map, 0); 3134 vm_map_lock(map); 3135 if (last_timestamp + 1 != map->timestamp) { 3136 /* 3137 * Look again for the entry because the map was 3138 * modified while it was unlocked. 3139 * Specifically, the entry may have been 3140 * clipped, merged, or deleted. 3141 */ 3142 if (!vm_map_lookup_entry(map, saved_start, 3143 &tmp_entry)) 3144 entry = tmp_entry->next; 3145 else { 3146 entry = tmp_entry; 3147 vm_map_clip_start(map, entry, 3148 saved_start); 3149 } 3150 } 3151 continue; 3152 } 3153 vm_map_clip_end(map, entry, end); 3154 3155 next = entry->next; 3156 3157 /* 3158 * Unwire before removing addresses from the pmap; otherwise, 3159 * unwiring will put the entries back in the pmap. 3160 */ 3161 if (entry->wired_count != 0) 3162 vm_map_entry_unwire(map, entry); 3163 3164 /* 3165 * Remove mappings for the pages, but only if the 3166 * mappings could exist. For instance, it does not 3167 * make sense to call pmap_remove() for guard entries. 3168 */ 3169 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 3170 entry->object.vm_object != NULL) 3171 pmap_remove(map->pmap, entry->start, entry->end); 3172 3173 /* 3174 * Delete the entry only after removing all pmap 3175 * entries pointing to its pages. (Otherwise, its 3176 * page frames may be reallocated, and any modify bits 3177 * will be set in the wrong object!) 3178 */ 3179 vm_map_entry_delete(map, entry); 3180 entry = next; 3181 } 3182 return (KERN_SUCCESS); 3183 } 3184 3185 /* 3186 * vm_map_remove: 3187 * 3188 * Remove the given address range from the target map. 3189 * This is the exported form of vm_map_delete. 3190 */ 3191 int 3192 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3193 { 3194 int result; 3195 3196 vm_map_lock(map); 3197 VM_MAP_RANGE_CHECK(map, start, end); 3198 result = vm_map_delete(map, start, end); 3199 vm_map_unlock(map); 3200 return (result); 3201 } 3202 3203 /* 3204 * vm_map_check_protection: 3205 * 3206 * Assert that the target map allows the specified privilege on the 3207 * entire address region given. The entire region must be allocated. 3208 * 3209 * WARNING! This code does not and should not check whether the 3210 * contents of the region is accessible. For example a smaller file 3211 * might be mapped into a larger address space. 3212 * 3213 * NOTE! This code is also called by munmap(). 3214 * 3215 * The map must be locked. A read lock is sufficient. 3216 */ 3217 boolean_t 3218 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3219 vm_prot_t protection) 3220 { 3221 vm_map_entry_t entry; 3222 vm_map_entry_t tmp_entry; 3223 3224 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3225 return (FALSE); 3226 entry = tmp_entry; 3227 3228 while (start < end) { 3229 /* 3230 * No holes allowed! 3231 */ 3232 if (start < entry->start) 3233 return (FALSE); 3234 /* 3235 * Check protection associated with entry. 3236 */ 3237 if ((entry->protection & protection) != protection) 3238 return (FALSE); 3239 /* go to next entry */ 3240 start = entry->end; 3241 entry = entry->next; 3242 } 3243 return (TRUE); 3244 } 3245 3246 /* 3247 * vm_map_copy_entry: 3248 * 3249 * Copies the contents of the source entry to the destination 3250 * entry. The entries *must* be aligned properly. 3251 */ 3252 static void 3253 vm_map_copy_entry( 3254 vm_map_t src_map, 3255 vm_map_t dst_map, 3256 vm_map_entry_t src_entry, 3257 vm_map_entry_t dst_entry, 3258 vm_ooffset_t *fork_charge) 3259 { 3260 vm_object_t src_object; 3261 vm_map_entry_t fake_entry; 3262 vm_offset_t size; 3263 struct ucred *cred; 3264 int charged; 3265 3266 VM_MAP_ASSERT_LOCKED(dst_map); 3267 3268 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3269 return; 3270 3271 if (src_entry->wired_count == 0 || 3272 (src_entry->protection & VM_PROT_WRITE) == 0) { 3273 /* 3274 * If the source entry is marked needs_copy, it is already 3275 * write-protected. 3276 */ 3277 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3278 (src_entry->protection & VM_PROT_WRITE) != 0) { 3279 pmap_protect(src_map->pmap, 3280 src_entry->start, 3281 src_entry->end, 3282 src_entry->protection & ~VM_PROT_WRITE); 3283 } 3284 3285 /* 3286 * Make a copy of the object. 3287 */ 3288 size = src_entry->end - src_entry->start; 3289 if ((src_object = src_entry->object.vm_object) != NULL) { 3290 VM_OBJECT_WLOCK(src_object); 3291 charged = ENTRY_CHARGED(src_entry); 3292 if (src_object->handle == NULL && 3293 (src_object->type == OBJT_DEFAULT || 3294 src_object->type == OBJT_SWAP)) { 3295 vm_object_collapse(src_object); 3296 if ((src_object->flags & (OBJ_NOSPLIT | 3297 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3298 vm_object_split(src_entry); 3299 src_object = 3300 src_entry->object.vm_object; 3301 } 3302 } 3303 vm_object_reference_locked(src_object); 3304 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3305 if (src_entry->cred != NULL && 3306 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3307 KASSERT(src_object->cred == NULL, 3308 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3309 src_object)); 3310 src_object->cred = src_entry->cred; 3311 src_object->charge = size; 3312 } 3313 VM_OBJECT_WUNLOCK(src_object); 3314 dst_entry->object.vm_object = src_object; 3315 if (charged) { 3316 cred = curthread->td_ucred; 3317 crhold(cred); 3318 dst_entry->cred = cred; 3319 *fork_charge += size; 3320 if (!(src_entry->eflags & 3321 MAP_ENTRY_NEEDS_COPY)) { 3322 crhold(cred); 3323 src_entry->cred = cred; 3324 *fork_charge += size; 3325 } 3326 } 3327 src_entry->eflags |= MAP_ENTRY_COW | 3328 MAP_ENTRY_NEEDS_COPY; 3329 dst_entry->eflags |= MAP_ENTRY_COW | 3330 MAP_ENTRY_NEEDS_COPY; 3331 dst_entry->offset = src_entry->offset; 3332 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3333 /* 3334 * MAP_ENTRY_VN_WRITECNT cannot 3335 * indicate write reference from 3336 * src_entry, since the entry is 3337 * marked as needs copy. Allocate a 3338 * fake entry that is used to 3339 * decrement object->un_pager.vnp.writecount 3340 * at the appropriate time. Attach 3341 * fake_entry to the deferred list. 3342 */ 3343 fake_entry = vm_map_entry_create(dst_map); 3344 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3345 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3346 vm_object_reference(src_object); 3347 fake_entry->object.vm_object = src_object; 3348 fake_entry->start = src_entry->start; 3349 fake_entry->end = src_entry->end; 3350 fake_entry->next = curthread->td_map_def_user; 3351 curthread->td_map_def_user = fake_entry; 3352 } 3353 3354 pmap_copy(dst_map->pmap, src_map->pmap, 3355 dst_entry->start, dst_entry->end - dst_entry->start, 3356 src_entry->start); 3357 } else { 3358 dst_entry->object.vm_object = NULL; 3359 dst_entry->offset = 0; 3360 if (src_entry->cred != NULL) { 3361 dst_entry->cred = curthread->td_ucred; 3362 crhold(dst_entry->cred); 3363 *fork_charge += size; 3364 } 3365 } 3366 } else { 3367 /* 3368 * We don't want to make writeable wired pages copy-on-write. 3369 * Immediately copy these pages into the new map by simulating 3370 * page faults. The new pages are pageable. 3371 */ 3372 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3373 fork_charge); 3374 } 3375 } 3376 3377 /* 3378 * vmspace_map_entry_forked: 3379 * Update the newly-forked vmspace each time a map entry is inherited 3380 * or copied. The values for vm_dsize and vm_tsize are approximate 3381 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3382 */ 3383 static void 3384 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3385 vm_map_entry_t entry) 3386 { 3387 vm_size_t entrysize; 3388 vm_offset_t newend; 3389 3390 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3391 return; 3392 entrysize = entry->end - entry->start; 3393 vm2->vm_map.size += entrysize; 3394 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3395 vm2->vm_ssize += btoc(entrysize); 3396 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3397 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3398 newend = MIN(entry->end, 3399 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3400 vm2->vm_dsize += btoc(newend - entry->start); 3401 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3402 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3403 newend = MIN(entry->end, 3404 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3405 vm2->vm_tsize += btoc(newend - entry->start); 3406 } 3407 } 3408 3409 /* 3410 * vmspace_fork: 3411 * Create a new process vmspace structure and vm_map 3412 * based on those of an existing process. The new map 3413 * is based on the old map, according to the inheritance 3414 * values on the regions in that map. 3415 * 3416 * XXX It might be worth coalescing the entries added to the new vmspace. 3417 * 3418 * The source map must not be locked. 3419 */ 3420 struct vmspace * 3421 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3422 { 3423 struct vmspace *vm2; 3424 vm_map_t new_map, old_map; 3425 vm_map_entry_t new_entry, old_entry; 3426 vm_object_t object; 3427 int locked; 3428 vm_inherit_t inh; 3429 3430 old_map = &vm1->vm_map; 3431 /* Copy immutable fields of vm1 to vm2. */ 3432 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 3433 pmap_pinit); 3434 if (vm2 == NULL) 3435 return (NULL); 3436 vm2->vm_taddr = vm1->vm_taddr; 3437 vm2->vm_daddr = vm1->vm_daddr; 3438 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3439 vm_map_lock(old_map); 3440 if (old_map->busy) 3441 vm_map_wait_busy(old_map); 3442 new_map = &vm2->vm_map; 3443 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3444 KASSERT(locked, ("vmspace_fork: lock failed")); 3445 3446 old_entry = old_map->header.next; 3447 3448 while (old_entry != &old_map->header) { 3449 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3450 panic("vm_map_fork: encountered a submap"); 3451 3452 inh = old_entry->inheritance; 3453 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3454 inh != VM_INHERIT_NONE) 3455 inh = VM_INHERIT_COPY; 3456 3457 switch (inh) { 3458 case VM_INHERIT_NONE: 3459 break; 3460 3461 case VM_INHERIT_SHARE: 3462 /* 3463 * Clone the entry, creating the shared object if necessary. 3464 */ 3465 object = old_entry->object.vm_object; 3466 if (object == NULL) { 3467 object = vm_object_allocate(OBJT_DEFAULT, 3468 atop(old_entry->end - old_entry->start)); 3469 old_entry->object.vm_object = object; 3470 old_entry->offset = 0; 3471 if (old_entry->cred != NULL) { 3472 object->cred = old_entry->cred; 3473 object->charge = old_entry->end - 3474 old_entry->start; 3475 old_entry->cred = NULL; 3476 } 3477 } 3478 3479 /* 3480 * Add the reference before calling vm_object_shadow 3481 * to insure that a shadow object is created. 3482 */ 3483 vm_object_reference(object); 3484 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3485 vm_object_shadow(&old_entry->object.vm_object, 3486 &old_entry->offset, 3487 old_entry->end - old_entry->start); 3488 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3489 /* Transfer the second reference too. */ 3490 vm_object_reference( 3491 old_entry->object.vm_object); 3492 3493 /* 3494 * As in vm_map_simplify_entry(), the 3495 * vnode lock will not be acquired in 3496 * this call to vm_object_deallocate(). 3497 */ 3498 vm_object_deallocate(object); 3499 object = old_entry->object.vm_object; 3500 } 3501 VM_OBJECT_WLOCK(object); 3502 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3503 if (old_entry->cred != NULL) { 3504 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3505 object->cred = old_entry->cred; 3506 object->charge = old_entry->end - old_entry->start; 3507 old_entry->cred = NULL; 3508 } 3509 3510 /* 3511 * Assert the correct state of the vnode 3512 * v_writecount while the object is locked, to 3513 * not relock it later for the assertion 3514 * correctness. 3515 */ 3516 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3517 object->type == OBJT_VNODE) { 3518 KASSERT(((struct vnode *)object->handle)-> 3519 v_writecount > 0, 3520 ("vmspace_fork: v_writecount %p", object)); 3521 KASSERT(object->un_pager.vnp.writemappings > 0, 3522 ("vmspace_fork: vnp.writecount %p", 3523 object)); 3524 } 3525 VM_OBJECT_WUNLOCK(object); 3526 3527 /* 3528 * Clone the entry, referencing the shared object. 3529 */ 3530 new_entry = vm_map_entry_create(new_map); 3531 *new_entry = *old_entry; 3532 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3533 MAP_ENTRY_IN_TRANSITION); 3534 new_entry->wiring_thread = NULL; 3535 new_entry->wired_count = 0; 3536 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3537 vnode_pager_update_writecount(object, 3538 new_entry->start, new_entry->end); 3539 } 3540 3541 /* 3542 * Insert the entry into the new map -- we know we're 3543 * inserting at the end of the new map. 3544 */ 3545 vm_map_entry_link(new_map, new_map->header.prev, 3546 new_entry); 3547 vmspace_map_entry_forked(vm1, vm2, new_entry); 3548 3549 /* 3550 * Update the physical map 3551 */ 3552 pmap_copy(new_map->pmap, old_map->pmap, 3553 new_entry->start, 3554 (old_entry->end - old_entry->start), 3555 old_entry->start); 3556 break; 3557 3558 case VM_INHERIT_COPY: 3559 /* 3560 * Clone the entry and link into the map. 3561 */ 3562 new_entry = vm_map_entry_create(new_map); 3563 *new_entry = *old_entry; 3564 /* 3565 * Copied entry is COW over the old object. 3566 */ 3567 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3568 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3569 new_entry->wiring_thread = NULL; 3570 new_entry->wired_count = 0; 3571 new_entry->object.vm_object = NULL; 3572 new_entry->cred = NULL; 3573 vm_map_entry_link(new_map, new_map->header.prev, 3574 new_entry); 3575 vmspace_map_entry_forked(vm1, vm2, new_entry); 3576 vm_map_copy_entry(old_map, new_map, old_entry, 3577 new_entry, fork_charge); 3578 break; 3579 3580 case VM_INHERIT_ZERO: 3581 /* 3582 * Create a new anonymous mapping entry modelled from 3583 * the old one. 3584 */ 3585 new_entry = vm_map_entry_create(new_map); 3586 memset(new_entry, 0, sizeof(*new_entry)); 3587 3588 new_entry->start = old_entry->start; 3589 new_entry->end = old_entry->end; 3590 new_entry->eflags = old_entry->eflags & 3591 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3592 MAP_ENTRY_VN_WRITECNT); 3593 new_entry->protection = old_entry->protection; 3594 new_entry->max_protection = old_entry->max_protection; 3595 new_entry->inheritance = VM_INHERIT_ZERO; 3596 3597 vm_map_entry_link(new_map, new_map->header.prev, 3598 new_entry); 3599 vmspace_map_entry_forked(vm1, vm2, new_entry); 3600 3601 new_entry->cred = curthread->td_ucred; 3602 crhold(new_entry->cred); 3603 *fork_charge += (new_entry->end - new_entry->start); 3604 3605 break; 3606 } 3607 old_entry = old_entry->next; 3608 } 3609 /* 3610 * Use inlined vm_map_unlock() to postpone handling the deferred 3611 * map entries, which cannot be done until both old_map and 3612 * new_map locks are released. 3613 */ 3614 sx_xunlock(&old_map->lock); 3615 sx_xunlock(&new_map->lock); 3616 vm_map_process_deferred(); 3617 3618 return (vm2); 3619 } 3620 3621 /* 3622 * Create a process's stack for exec_new_vmspace(). This function is never 3623 * asked to wire the newly created stack. 3624 */ 3625 int 3626 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3627 vm_prot_t prot, vm_prot_t max, int cow) 3628 { 3629 vm_size_t growsize, init_ssize; 3630 rlim_t vmemlim; 3631 int rv; 3632 3633 MPASS((map->flags & MAP_WIREFUTURE) == 0); 3634 growsize = sgrowsiz; 3635 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3636 vm_map_lock(map); 3637 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3638 /* If we would blow our VMEM resource limit, no go */ 3639 if (map->size + init_ssize > vmemlim) { 3640 rv = KERN_NO_SPACE; 3641 goto out; 3642 } 3643 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3644 max, cow); 3645 out: 3646 vm_map_unlock(map); 3647 return (rv); 3648 } 3649 3650 static int stack_guard_page = 1; 3651 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 3652 &stack_guard_page, 0, 3653 "Specifies the number of guard pages for a stack that grows"); 3654 3655 static int 3656 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3657 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3658 { 3659 vm_map_entry_t new_entry, prev_entry; 3660 vm_offset_t bot, gap_bot, gap_top, top; 3661 vm_size_t init_ssize, sgp; 3662 int orient, rv; 3663 3664 /* 3665 * The stack orientation is piggybacked with the cow argument. 3666 * Extract it into orient and mask the cow argument so that we 3667 * don't pass it around further. 3668 */ 3669 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 3670 KASSERT(orient != 0, ("No stack grow direction")); 3671 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 3672 ("bi-dir stack")); 3673 3674 if (addrbos < vm_map_min(map) || 3675 addrbos + max_ssize > vm_map_max(map) || 3676 addrbos + max_ssize <= addrbos) 3677 return (KERN_INVALID_ADDRESS); 3678 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 3679 if (sgp >= max_ssize) 3680 return (KERN_INVALID_ARGUMENT); 3681 3682 init_ssize = growsize; 3683 if (max_ssize < init_ssize + sgp) 3684 init_ssize = max_ssize - sgp; 3685 3686 /* If addr is already mapped, no go */ 3687 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3688 return (KERN_NO_SPACE); 3689 3690 /* 3691 * If we can't accommodate max_ssize in the current mapping, no go. 3692 */ 3693 if (prev_entry->next->start < addrbos + max_ssize) 3694 return (KERN_NO_SPACE); 3695 3696 /* 3697 * We initially map a stack of only init_ssize. We will grow as 3698 * needed later. Depending on the orientation of the stack (i.e. 3699 * the grow direction) we either map at the top of the range, the 3700 * bottom of the range or in the middle. 3701 * 3702 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3703 * and cow to be 0. Possibly we should eliminate these as input 3704 * parameters, and just pass these values here in the insert call. 3705 */ 3706 if (orient == MAP_STACK_GROWS_DOWN) { 3707 bot = addrbos + max_ssize - init_ssize; 3708 top = bot + init_ssize; 3709 gap_bot = addrbos; 3710 gap_top = bot; 3711 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 3712 bot = addrbos; 3713 top = bot + init_ssize; 3714 gap_bot = top; 3715 gap_top = addrbos + max_ssize; 3716 } 3717 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3718 if (rv != KERN_SUCCESS) 3719 return (rv); 3720 new_entry = prev_entry->next; 3721 KASSERT(new_entry->end == top || new_entry->start == bot, 3722 ("Bad entry start/end for new stack entry")); 3723 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3724 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3725 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3726 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3727 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3728 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3729 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 3730 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 3731 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 3732 if (rv != KERN_SUCCESS) 3733 (void)vm_map_delete(map, bot, top); 3734 return (rv); 3735 } 3736 3737 /* 3738 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 3739 * successfully grow the stack. 3740 */ 3741 static int 3742 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 3743 { 3744 vm_map_entry_t stack_entry; 3745 struct proc *p; 3746 struct vmspace *vm; 3747 struct ucred *cred; 3748 vm_offset_t gap_end, gap_start, grow_start; 3749 size_t grow_amount, guard, max_grow; 3750 rlim_t lmemlim, stacklim, vmemlim; 3751 int rv, rv1; 3752 bool gap_deleted, grow_down, is_procstack; 3753 #ifdef notyet 3754 uint64_t limit; 3755 #endif 3756 #ifdef RACCT 3757 int error; 3758 #endif 3759 3760 p = curproc; 3761 vm = p->p_vmspace; 3762 3763 /* 3764 * Disallow stack growth when the access is performed by a 3765 * debugger or AIO daemon. The reason is that the wrong 3766 * resource limits are applied. 3767 */ 3768 if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL) 3769 return (KERN_FAILURE); 3770 3771 MPASS(!map->system_map); 3772 3773 guard = stack_guard_page * PAGE_SIZE; 3774 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 3775 stacklim = lim_cur(curthread, RLIMIT_STACK); 3776 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3777 retry: 3778 /* If addr is not in a hole for a stack grow area, no need to grow. */ 3779 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 3780 return (KERN_FAILURE); 3781 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 3782 return (KERN_SUCCESS); 3783 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 3784 stack_entry = gap_entry->next; 3785 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 3786 stack_entry->start != gap_entry->end) 3787 return (KERN_FAILURE); 3788 grow_amount = round_page(stack_entry->start - addr); 3789 grow_down = true; 3790 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 3791 stack_entry = gap_entry->prev; 3792 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 3793 stack_entry->end != gap_entry->start) 3794 return (KERN_FAILURE); 3795 grow_amount = round_page(addr + 1 - stack_entry->end); 3796 grow_down = false; 3797 } else { 3798 return (KERN_FAILURE); 3799 } 3800 max_grow = gap_entry->end - gap_entry->start; 3801 if (guard > max_grow) 3802 return (KERN_NO_SPACE); 3803 max_grow -= guard; 3804 if (grow_amount > max_grow) 3805 return (KERN_NO_SPACE); 3806 3807 /* 3808 * If this is the main process stack, see if we're over the stack 3809 * limit. 3810 */ 3811 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 3812 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 3813 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 3814 return (KERN_NO_SPACE); 3815 3816 #ifdef RACCT 3817 if (racct_enable) { 3818 PROC_LOCK(p); 3819 if (is_procstack && racct_set(p, RACCT_STACK, 3820 ctob(vm->vm_ssize) + grow_amount)) { 3821 PROC_UNLOCK(p); 3822 return (KERN_NO_SPACE); 3823 } 3824 PROC_UNLOCK(p); 3825 } 3826 #endif 3827 3828 grow_amount = roundup(grow_amount, sgrowsiz); 3829 if (grow_amount > max_grow) 3830 grow_amount = max_grow; 3831 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3832 grow_amount = trunc_page((vm_size_t)stacklim) - 3833 ctob(vm->vm_ssize); 3834 } 3835 3836 #ifdef notyet 3837 PROC_LOCK(p); 3838 limit = racct_get_available(p, RACCT_STACK); 3839 PROC_UNLOCK(p); 3840 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3841 grow_amount = limit - ctob(vm->vm_ssize); 3842 #endif 3843 3844 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 3845 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3846 rv = KERN_NO_SPACE; 3847 goto out; 3848 } 3849 #ifdef RACCT 3850 if (racct_enable) { 3851 PROC_LOCK(p); 3852 if (racct_set(p, RACCT_MEMLOCK, 3853 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3854 PROC_UNLOCK(p); 3855 rv = KERN_NO_SPACE; 3856 goto out; 3857 } 3858 PROC_UNLOCK(p); 3859 } 3860 #endif 3861 } 3862 3863 /* If we would blow our VMEM resource limit, no go */ 3864 if (map->size + grow_amount > vmemlim) { 3865 rv = KERN_NO_SPACE; 3866 goto out; 3867 } 3868 #ifdef RACCT 3869 if (racct_enable) { 3870 PROC_LOCK(p); 3871 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3872 PROC_UNLOCK(p); 3873 rv = KERN_NO_SPACE; 3874 goto out; 3875 } 3876 PROC_UNLOCK(p); 3877 } 3878 #endif 3879 3880 if (vm_map_lock_upgrade(map)) { 3881 gap_entry = NULL; 3882 vm_map_lock_read(map); 3883 goto retry; 3884 } 3885 3886 if (grow_down) { 3887 grow_start = gap_entry->end - grow_amount; 3888 if (gap_entry->start + grow_amount == gap_entry->end) { 3889 gap_start = gap_entry->start; 3890 gap_end = gap_entry->end; 3891 vm_map_entry_delete(map, gap_entry); 3892 gap_deleted = true; 3893 } else { 3894 MPASS(gap_entry->start < gap_entry->end - grow_amount); 3895 gap_entry->end -= grow_amount; 3896 vm_map_entry_resize_free(map, gap_entry); 3897 gap_deleted = false; 3898 } 3899 rv = vm_map_insert(map, NULL, 0, grow_start, 3900 grow_start + grow_amount, 3901 stack_entry->protection, stack_entry->max_protection, 3902 MAP_STACK_GROWS_DOWN); 3903 if (rv != KERN_SUCCESS) { 3904 if (gap_deleted) { 3905 rv1 = vm_map_insert(map, NULL, 0, gap_start, 3906 gap_end, VM_PROT_NONE, VM_PROT_NONE, 3907 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 3908 MPASS(rv1 == KERN_SUCCESS); 3909 } else { 3910 gap_entry->end += grow_amount; 3911 vm_map_entry_resize_free(map, gap_entry); 3912 } 3913 } 3914 } else { 3915 grow_start = stack_entry->end; 3916 cred = stack_entry->cred; 3917 if (cred == NULL && stack_entry->object.vm_object != NULL) 3918 cred = stack_entry->object.vm_object->cred; 3919 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3920 rv = KERN_NO_SPACE; 3921 /* Grow the underlying object if applicable. */ 3922 else if (stack_entry->object.vm_object == NULL || 3923 vm_object_coalesce(stack_entry->object.vm_object, 3924 stack_entry->offset, 3925 (vm_size_t)(stack_entry->end - stack_entry->start), 3926 (vm_size_t)grow_amount, cred != NULL)) { 3927 if (gap_entry->start + grow_amount == gap_entry->end) 3928 vm_map_entry_delete(map, gap_entry); 3929 else 3930 gap_entry->start += grow_amount; 3931 stack_entry->end += grow_amount; 3932 map->size += grow_amount; 3933 vm_map_entry_resize_free(map, stack_entry); 3934 rv = KERN_SUCCESS; 3935 } else 3936 rv = KERN_FAILURE; 3937 } 3938 if (rv == KERN_SUCCESS && is_procstack) 3939 vm->vm_ssize += btoc(grow_amount); 3940 3941 /* 3942 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3943 */ 3944 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 3945 vm_map_unlock(map); 3946 vm_map_wire(map, grow_start, grow_start + grow_amount, 3947 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 3948 vm_map_lock_read(map); 3949 } else 3950 vm_map_lock_downgrade(map); 3951 3952 out: 3953 #ifdef RACCT 3954 if (racct_enable && rv != KERN_SUCCESS) { 3955 PROC_LOCK(p); 3956 error = racct_set(p, RACCT_VMEM, map->size); 3957 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3958 if (!old_mlock) { 3959 error = racct_set(p, RACCT_MEMLOCK, 3960 ptoa(pmap_wired_count(map->pmap))); 3961 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3962 } 3963 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3964 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3965 PROC_UNLOCK(p); 3966 } 3967 #endif 3968 3969 return (rv); 3970 } 3971 3972 /* 3973 * Unshare the specified VM space for exec. If other processes are 3974 * mapped to it, then create a new one. The new vmspace is null. 3975 */ 3976 int 3977 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3978 { 3979 struct vmspace *oldvmspace = p->p_vmspace; 3980 struct vmspace *newvmspace; 3981 3982 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3983 ("vmspace_exec recursed")); 3984 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 3985 if (newvmspace == NULL) 3986 return (ENOMEM); 3987 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3988 /* 3989 * This code is written like this for prototype purposes. The 3990 * goal is to avoid running down the vmspace here, but let the 3991 * other process's that are still using the vmspace to finally 3992 * run it down. Even though there is little or no chance of blocking 3993 * here, it is a good idea to keep this form for future mods. 3994 */ 3995 PROC_VMSPACE_LOCK(p); 3996 p->p_vmspace = newvmspace; 3997 PROC_VMSPACE_UNLOCK(p); 3998 if (p == curthread->td_proc) 3999 pmap_activate(curthread); 4000 curthread->td_pflags |= TDP_EXECVMSPC; 4001 return (0); 4002 } 4003 4004 /* 4005 * Unshare the specified VM space for forcing COW. This 4006 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4007 */ 4008 int 4009 vmspace_unshare(struct proc *p) 4010 { 4011 struct vmspace *oldvmspace = p->p_vmspace; 4012 struct vmspace *newvmspace; 4013 vm_ooffset_t fork_charge; 4014 4015 if (oldvmspace->vm_refcnt == 1) 4016 return (0); 4017 fork_charge = 0; 4018 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4019 if (newvmspace == NULL) 4020 return (ENOMEM); 4021 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4022 vmspace_free(newvmspace); 4023 return (ENOMEM); 4024 } 4025 PROC_VMSPACE_LOCK(p); 4026 p->p_vmspace = newvmspace; 4027 PROC_VMSPACE_UNLOCK(p); 4028 if (p == curthread->td_proc) 4029 pmap_activate(curthread); 4030 vmspace_free(oldvmspace); 4031 return (0); 4032 } 4033 4034 /* 4035 * vm_map_lookup: 4036 * 4037 * Finds the VM object, offset, and 4038 * protection for a given virtual address in the 4039 * specified map, assuming a page fault of the 4040 * type specified. 4041 * 4042 * Leaves the map in question locked for read; return 4043 * values are guaranteed until a vm_map_lookup_done 4044 * call is performed. Note that the map argument 4045 * is in/out; the returned map must be used in 4046 * the call to vm_map_lookup_done. 4047 * 4048 * A handle (out_entry) is returned for use in 4049 * vm_map_lookup_done, to make that fast. 4050 * 4051 * If a lookup is requested with "write protection" 4052 * specified, the map may be changed to perform virtual 4053 * copying operations, although the data referenced will 4054 * remain the same. 4055 */ 4056 int 4057 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4058 vm_offset_t vaddr, 4059 vm_prot_t fault_typea, 4060 vm_map_entry_t *out_entry, /* OUT */ 4061 vm_object_t *object, /* OUT */ 4062 vm_pindex_t *pindex, /* OUT */ 4063 vm_prot_t *out_prot, /* OUT */ 4064 boolean_t *wired) /* OUT */ 4065 { 4066 vm_map_entry_t entry; 4067 vm_map_t map = *var_map; 4068 vm_prot_t prot; 4069 vm_prot_t fault_type = fault_typea; 4070 vm_object_t eobject; 4071 vm_size_t size; 4072 struct ucred *cred; 4073 4074 RetryLookup: 4075 4076 vm_map_lock_read(map); 4077 4078 RetryLookupLocked: 4079 /* 4080 * Lookup the faulting address. 4081 */ 4082 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4083 vm_map_unlock_read(map); 4084 return (KERN_INVALID_ADDRESS); 4085 } 4086 4087 entry = *out_entry; 4088 4089 /* 4090 * Handle submaps. 4091 */ 4092 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4093 vm_map_t old_map = map; 4094 4095 *var_map = map = entry->object.sub_map; 4096 vm_map_unlock_read(old_map); 4097 goto RetryLookup; 4098 } 4099 4100 /* 4101 * Check whether this task is allowed to have this page. 4102 */ 4103 prot = entry->protection; 4104 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4105 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4106 if (prot == VM_PROT_NONE && map != kernel_map && 4107 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4108 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4109 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4110 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4111 goto RetryLookupLocked; 4112 } 4113 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4114 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4115 vm_map_unlock_read(map); 4116 return (KERN_PROTECTION_FAILURE); 4117 } 4118 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4119 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4120 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4121 ("entry %p flags %x", entry, entry->eflags)); 4122 if ((fault_typea & VM_PROT_COPY) != 0 && 4123 (entry->max_protection & VM_PROT_WRITE) == 0 && 4124 (entry->eflags & MAP_ENTRY_COW) == 0) { 4125 vm_map_unlock_read(map); 4126 return (KERN_PROTECTION_FAILURE); 4127 } 4128 4129 /* 4130 * If this page is not pageable, we have to get it for all possible 4131 * accesses. 4132 */ 4133 *wired = (entry->wired_count != 0); 4134 if (*wired) 4135 fault_type = entry->protection; 4136 size = entry->end - entry->start; 4137 /* 4138 * If the entry was copy-on-write, we either ... 4139 */ 4140 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4141 /* 4142 * If we want to write the page, we may as well handle that 4143 * now since we've got the map locked. 4144 * 4145 * If we don't need to write the page, we just demote the 4146 * permissions allowed. 4147 */ 4148 if ((fault_type & VM_PROT_WRITE) != 0 || 4149 (fault_typea & VM_PROT_COPY) != 0) { 4150 /* 4151 * Make a new object, and place it in the object 4152 * chain. Note that no new references have appeared 4153 * -- one just moved from the map to the new 4154 * object. 4155 */ 4156 if (vm_map_lock_upgrade(map)) 4157 goto RetryLookup; 4158 4159 if (entry->cred == NULL) { 4160 /* 4161 * The debugger owner is charged for 4162 * the memory. 4163 */ 4164 cred = curthread->td_ucred; 4165 crhold(cred); 4166 if (!swap_reserve_by_cred(size, cred)) { 4167 crfree(cred); 4168 vm_map_unlock(map); 4169 return (KERN_RESOURCE_SHORTAGE); 4170 } 4171 entry->cred = cred; 4172 } 4173 vm_object_shadow(&entry->object.vm_object, 4174 &entry->offset, size); 4175 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4176 eobject = entry->object.vm_object; 4177 if (eobject->cred != NULL) { 4178 /* 4179 * The object was not shadowed. 4180 */ 4181 swap_release_by_cred(size, entry->cred); 4182 crfree(entry->cred); 4183 entry->cred = NULL; 4184 } else if (entry->cred != NULL) { 4185 VM_OBJECT_WLOCK(eobject); 4186 eobject->cred = entry->cred; 4187 eobject->charge = size; 4188 VM_OBJECT_WUNLOCK(eobject); 4189 entry->cred = NULL; 4190 } 4191 4192 vm_map_lock_downgrade(map); 4193 } else { 4194 /* 4195 * We're attempting to read a copy-on-write page -- 4196 * don't allow writes. 4197 */ 4198 prot &= ~VM_PROT_WRITE; 4199 } 4200 } 4201 4202 /* 4203 * Create an object if necessary. 4204 */ 4205 if (entry->object.vm_object == NULL && 4206 !map->system_map) { 4207 if (vm_map_lock_upgrade(map)) 4208 goto RetryLookup; 4209 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4210 atop(size)); 4211 entry->offset = 0; 4212 if (entry->cred != NULL) { 4213 VM_OBJECT_WLOCK(entry->object.vm_object); 4214 entry->object.vm_object->cred = entry->cred; 4215 entry->object.vm_object->charge = size; 4216 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4217 entry->cred = NULL; 4218 } 4219 vm_map_lock_downgrade(map); 4220 } 4221 4222 /* 4223 * Return the object/offset from this entry. If the entry was 4224 * copy-on-write or empty, it has been fixed up. 4225 */ 4226 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4227 *object = entry->object.vm_object; 4228 4229 *out_prot = prot; 4230 return (KERN_SUCCESS); 4231 } 4232 4233 /* 4234 * vm_map_lookup_locked: 4235 * 4236 * Lookup the faulting address. A version of vm_map_lookup that returns 4237 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4238 */ 4239 int 4240 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4241 vm_offset_t vaddr, 4242 vm_prot_t fault_typea, 4243 vm_map_entry_t *out_entry, /* OUT */ 4244 vm_object_t *object, /* OUT */ 4245 vm_pindex_t *pindex, /* OUT */ 4246 vm_prot_t *out_prot, /* OUT */ 4247 boolean_t *wired) /* OUT */ 4248 { 4249 vm_map_entry_t entry; 4250 vm_map_t map = *var_map; 4251 vm_prot_t prot; 4252 vm_prot_t fault_type = fault_typea; 4253 4254 /* 4255 * Lookup the faulting address. 4256 */ 4257 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4258 return (KERN_INVALID_ADDRESS); 4259 4260 entry = *out_entry; 4261 4262 /* 4263 * Fail if the entry refers to a submap. 4264 */ 4265 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4266 return (KERN_FAILURE); 4267 4268 /* 4269 * Check whether this task is allowed to have this page. 4270 */ 4271 prot = entry->protection; 4272 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4273 if ((fault_type & prot) != fault_type) 4274 return (KERN_PROTECTION_FAILURE); 4275 4276 /* 4277 * If this page is not pageable, we have to get it for all possible 4278 * accesses. 4279 */ 4280 *wired = (entry->wired_count != 0); 4281 if (*wired) 4282 fault_type = entry->protection; 4283 4284 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4285 /* 4286 * Fail if the entry was copy-on-write for a write fault. 4287 */ 4288 if (fault_type & VM_PROT_WRITE) 4289 return (KERN_FAILURE); 4290 /* 4291 * We're attempting to read a copy-on-write page -- 4292 * don't allow writes. 4293 */ 4294 prot &= ~VM_PROT_WRITE; 4295 } 4296 4297 /* 4298 * Fail if an object should be created. 4299 */ 4300 if (entry->object.vm_object == NULL && !map->system_map) 4301 return (KERN_FAILURE); 4302 4303 /* 4304 * Return the object/offset from this entry. If the entry was 4305 * copy-on-write or empty, it has been fixed up. 4306 */ 4307 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4308 *object = entry->object.vm_object; 4309 4310 *out_prot = prot; 4311 return (KERN_SUCCESS); 4312 } 4313 4314 /* 4315 * vm_map_lookup_done: 4316 * 4317 * Releases locks acquired by a vm_map_lookup 4318 * (according to the handle returned by that lookup). 4319 */ 4320 void 4321 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4322 { 4323 /* 4324 * Unlock the main-level map 4325 */ 4326 vm_map_unlock_read(map); 4327 } 4328 4329 vm_offset_t 4330 vm_map_max_KBI(const struct vm_map *map) 4331 { 4332 4333 return (vm_map_max(map)); 4334 } 4335 4336 vm_offset_t 4337 vm_map_min_KBI(const struct vm_map *map) 4338 { 4339 4340 return (vm_map_min(map)); 4341 } 4342 4343 pmap_t 4344 vm_map_pmap_KBI(vm_map_t map) 4345 { 4346 4347 return (map->pmap); 4348 } 4349 4350 #include "opt_ddb.h" 4351 #ifdef DDB 4352 #include <sys/kernel.h> 4353 4354 #include <ddb/ddb.h> 4355 4356 static void 4357 vm_map_print(vm_map_t map) 4358 { 4359 vm_map_entry_t entry; 4360 4361 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4362 (void *)map, 4363 (void *)map->pmap, map->nentries, map->timestamp); 4364 4365 db_indent += 2; 4366 for (entry = map->header.next; entry != &map->header; 4367 entry = entry->next) { 4368 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4369 (void *)entry, (void *)entry->start, (void *)entry->end, 4370 entry->eflags); 4371 { 4372 static char *inheritance_name[4] = 4373 {"share", "copy", "none", "donate_copy"}; 4374 4375 db_iprintf(" prot=%x/%x/%s", 4376 entry->protection, 4377 entry->max_protection, 4378 inheritance_name[(int)(unsigned char)entry->inheritance]); 4379 if (entry->wired_count != 0) 4380 db_printf(", wired"); 4381 } 4382 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4383 db_printf(", share=%p, offset=0x%jx\n", 4384 (void *)entry->object.sub_map, 4385 (uintmax_t)entry->offset); 4386 if ((entry->prev == &map->header) || 4387 (entry->prev->object.sub_map != 4388 entry->object.sub_map)) { 4389 db_indent += 2; 4390 vm_map_print((vm_map_t)entry->object.sub_map); 4391 db_indent -= 2; 4392 } 4393 } else { 4394 if (entry->cred != NULL) 4395 db_printf(", ruid %d", entry->cred->cr_ruid); 4396 db_printf(", object=%p, offset=0x%jx", 4397 (void *)entry->object.vm_object, 4398 (uintmax_t)entry->offset); 4399 if (entry->object.vm_object && entry->object.vm_object->cred) 4400 db_printf(", obj ruid %d charge %jx", 4401 entry->object.vm_object->cred->cr_ruid, 4402 (uintmax_t)entry->object.vm_object->charge); 4403 if (entry->eflags & MAP_ENTRY_COW) 4404 db_printf(", copy (%s)", 4405 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4406 db_printf("\n"); 4407 4408 if ((entry->prev == &map->header) || 4409 (entry->prev->object.vm_object != 4410 entry->object.vm_object)) { 4411 db_indent += 2; 4412 vm_object_print((db_expr_t)(intptr_t) 4413 entry->object.vm_object, 4414 0, 0, (char *)0); 4415 db_indent -= 2; 4416 } 4417 } 4418 } 4419 db_indent -= 2; 4420 } 4421 4422 DB_SHOW_COMMAND(map, map) 4423 { 4424 4425 if (!have_addr) { 4426 db_printf("usage: show map <addr>\n"); 4427 return; 4428 } 4429 vm_map_print((vm_map_t)addr); 4430 } 4431 4432 DB_SHOW_COMMAND(procvm, procvm) 4433 { 4434 struct proc *p; 4435 4436 if (have_addr) { 4437 p = db_lookup_proc(addr); 4438 } else { 4439 p = curproc; 4440 } 4441 4442 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4443 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4444 (void *)vmspace_pmap(p->p_vmspace)); 4445 4446 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4447 } 4448 4449 #endif /* DDB */ 4450