1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/ktr.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/racct.h> 81 #include <sys/resourcevar.h> 82 #include <sys/rwlock.h> 83 #include <sys/file.h> 84 #include <sys/sysctl.h> 85 #include <sys/sysent.h> 86 #include <sys/shm.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_pager.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/uma.h> 100 101 /* 102 * Virtual memory maps provide for the mapping, protection, 103 * and sharing of virtual memory objects. In addition, 104 * this module provides for an efficient virtual copy of 105 * memory from one map to another. 106 * 107 * Synchronization is required prior to most operations. 108 * 109 * Maps consist of an ordered doubly-linked list of simple 110 * entries; a self-adjusting binary search tree of these 111 * entries is used to speed up lookups. 112 * 113 * Since portions of maps are specified by start/end addresses, 114 * which may not align with existing map entries, all 115 * routines merely "clip" entries to these start/end values. 116 * [That is, an entry is split into two, bordering at a 117 * start or end value.] Note that these clippings may not 118 * always be necessary (as the two resulting entries are then 119 * not changed); however, the clipping is done for convenience. 120 * 121 * As mentioned above, virtual copy operations are performed 122 * by copying VM object references from one map to 123 * another, and then marking both regions as copy-on-write. 124 */ 125 126 static struct mtx map_sleep_mtx; 127 static uma_zone_t mapentzone; 128 static uma_zone_t kmapentzone; 129 static uma_zone_t mapzone; 130 static uma_zone_t vmspace_zone; 131 static int vmspace_zinit(void *mem, int size, int flags); 132 static int vm_map_zinit(void *mem, int ize, int flags); 133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 134 vm_offset_t max); 135 static int vm_map_alignspace(vm_map_t map, vm_object_t object, 136 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, 137 vm_offset_t max_addr, vm_offset_t alignment); 138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 142 vm_map_entry_t gap_entry); 143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 144 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 145 #ifdef INVARIANTS 146 static void vm_map_zdtor(void *mem, int size, void *arg); 147 static void vmspace_zdtor(void *mem, int size, void *arg); 148 #endif 149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 150 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 151 int cow); 152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 153 vm_offset_t failed_addr); 154 155 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 156 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 157 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 158 159 /* 160 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 161 * stable. 162 */ 163 #define PROC_VMSPACE_LOCK(p) do { } while (0) 164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 165 166 /* 167 * VM_MAP_RANGE_CHECK: [ internal use only ] 168 * 169 * Asserts that the starting and ending region 170 * addresses fall within the valid range of the map. 171 */ 172 #define VM_MAP_RANGE_CHECK(map, start, end) \ 173 { \ 174 if (start < vm_map_min(map)) \ 175 start = vm_map_min(map); \ 176 if (end > vm_map_max(map)) \ 177 end = vm_map_max(map); \ 178 if (start > end) \ 179 start = end; \ 180 } 181 182 /* 183 * vm_map_startup: 184 * 185 * Initialize the vm_map module. Must be called before 186 * any other vm_map routines. 187 * 188 * Map and entry structures are allocated from the general 189 * purpose memory pool with some exceptions: 190 * 191 * - The kernel map and kmem submap are allocated statically. 192 * - Kernel map entries are allocated out of a static pool. 193 * 194 * These restrictions are necessary since malloc() uses the 195 * maps and requires map entries. 196 */ 197 198 void 199 vm_map_startup(void) 200 { 201 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 202 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 203 #ifdef INVARIANTS 204 vm_map_zdtor, 205 #else 206 NULL, 207 #endif 208 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 209 uma_prealloc(mapzone, MAX_KMAP); 210 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 212 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 213 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 214 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 215 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 216 #ifdef INVARIANTS 217 vmspace_zdtor, 218 #else 219 NULL, 220 #endif 221 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 222 } 223 224 static int 225 vmspace_zinit(void *mem, int size, int flags) 226 { 227 struct vmspace *vm; 228 229 vm = (struct vmspace *)mem; 230 231 vm->vm_map.pmap = NULL; 232 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 233 PMAP_LOCK_INIT(vmspace_pmap(vm)); 234 return (0); 235 } 236 237 static int 238 vm_map_zinit(void *mem, int size, int flags) 239 { 240 vm_map_t map; 241 242 map = (vm_map_t)mem; 243 memset(map, 0, sizeof(*map)); 244 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 245 sx_init(&map->lock, "vm map (user)"); 246 return (0); 247 } 248 249 #ifdef INVARIANTS 250 static void 251 vmspace_zdtor(void *mem, int size, void *arg) 252 { 253 struct vmspace *vm; 254 255 vm = (struct vmspace *)mem; 256 257 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 258 } 259 static void 260 vm_map_zdtor(void *mem, int size, void *arg) 261 { 262 vm_map_t map; 263 264 map = (vm_map_t)mem; 265 KASSERT(map->nentries == 0, 266 ("map %p nentries == %d on free.", 267 map, map->nentries)); 268 KASSERT(map->size == 0, 269 ("map %p size == %lu on free.", 270 map, (unsigned long)map->size)); 271 } 272 #endif /* INVARIANTS */ 273 274 /* 275 * Allocate a vmspace structure, including a vm_map and pmap, 276 * and initialize those structures. The refcnt is set to 1. 277 * 278 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 279 */ 280 struct vmspace * 281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 282 { 283 struct vmspace *vm; 284 285 vm = uma_zalloc(vmspace_zone, M_WAITOK); 286 287 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 288 289 if (pinit == NULL) 290 pinit = &pmap_pinit; 291 292 if (!pinit(vmspace_pmap(vm))) { 293 uma_zfree(vmspace_zone, vm); 294 return (NULL); 295 } 296 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 297 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 298 vm->vm_refcnt = 1; 299 vm->vm_shm = NULL; 300 vm->vm_swrss = 0; 301 vm->vm_tsize = 0; 302 vm->vm_dsize = 0; 303 vm->vm_ssize = 0; 304 vm->vm_taddr = 0; 305 vm->vm_daddr = 0; 306 vm->vm_maxsaddr = 0; 307 return (vm); 308 } 309 310 #ifdef RACCT 311 static void 312 vmspace_container_reset(struct proc *p) 313 { 314 315 PROC_LOCK(p); 316 racct_set(p, RACCT_DATA, 0); 317 racct_set(p, RACCT_STACK, 0); 318 racct_set(p, RACCT_RSS, 0); 319 racct_set(p, RACCT_MEMLOCK, 0); 320 racct_set(p, RACCT_VMEM, 0); 321 PROC_UNLOCK(p); 322 } 323 #endif 324 325 static inline void 326 vmspace_dofree(struct vmspace *vm) 327 { 328 329 CTR1(KTR_VM, "vmspace_free: %p", vm); 330 331 /* 332 * Make sure any SysV shm is freed, it might not have been in 333 * exit1(). 334 */ 335 shmexit(vm); 336 337 /* 338 * Lock the map, to wait out all other references to it. 339 * Delete all of the mappings and pages they hold, then call 340 * the pmap module to reclaim anything left. 341 */ 342 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 343 vm->vm_map.max_offset); 344 345 pmap_release(vmspace_pmap(vm)); 346 vm->vm_map.pmap = NULL; 347 uma_zfree(vmspace_zone, vm); 348 } 349 350 void 351 vmspace_free(struct vmspace *vm) 352 { 353 354 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 355 "vmspace_free() called"); 356 357 if (vm->vm_refcnt == 0) 358 panic("vmspace_free: attempt to free already freed vmspace"); 359 360 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 361 vmspace_dofree(vm); 362 } 363 364 void 365 vmspace_exitfree(struct proc *p) 366 { 367 struct vmspace *vm; 368 369 PROC_VMSPACE_LOCK(p); 370 vm = p->p_vmspace; 371 p->p_vmspace = NULL; 372 PROC_VMSPACE_UNLOCK(p); 373 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 374 vmspace_free(vm); 375 } 376 377 void 378 vmspace_exit(struct thread *td) 379 { 380 int refcnt; 381 struct vmspace *vm; 382 struct proc *p; 383 384 /* 385 * Release user portion of address space. 386 * This releases references to vnodes, 387 * which could cause I/O if the file has been unlinked. 388 * Need to do this early enough that we can still sleep. 389 * 390 * The last exiting process to reach this point releases as 391 * much of the environment as it can. vmspace_dofree() is the 392 * slower fallback in case another process had a temporary 393 * reference to the vmspace. 394 */ 395 396 p = td->td_proc; 397 vm = p->p_vmspace; 398 atomic_add_int(&vmspace0.vm_refcnt, 1); 399 do { 400 refcnt = vm->vm_refcnt; 401 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 402 /* Switch now since other proc might free vmspace */ 403 PROC_VMSPACE_LOCK(p); 404 p->p_vmspace = &vmspace0; 405 PROC_VMSPACE_UNLOCK(p); 406 pmap_activate(td); 407 } 408 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 409 if (refcnt == 1) { 410 if (p->p_vmspace != vm) { 411 /* vmspace not yet freed, switch back */ 412 PROC_VMSPACE_LOCK(p); 413 p->p_vmspace = vm; 414 PROC_VMSPACE_UNLOCK(p); 415 pmap_activate(td); 416 } 417 pmap_remove_pages(vmspace_pmap(vm)); 418 /* Switch now since this proc will free vmspace */ 419 PROC_VMSPACE_LOCK(p); 420 p->p_vmspace = &vmspace0; 421 PROC_VMSPACE_UNLOCK(p); 422 pmap_activate(td); 423 vmspace_dofree(vm); 424 } 425 #ifdef RACCT 426 if (racct_enable) 427 vmspace_container_reset(p); 428 #endif 429 } 430 431 /* Acquire reference to vmspace owned by another process. */ 432 433 struct vmspace * 434 vmspace_acquire_ref(struct proc *p) 435 { 436 struct vmspace *vm; 437 int refcnt; 438 439 PROC_VMSPACE_LOCK(p); 440 vm = p->p_vmspace; 441 if (vm == NULL) { 442 PROC_VMSPACE_UNLOCK(p); 443 return (NULL); 444 } 445 do { 446 refcnt = vm->vm_refcnt; 447 if (refcnt <= 0) { /* Avoid 0->1 transition */ 448 PROC_VMSPACE_UNLOCK(p); 449 return (NULL); 450 } 451 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 452 if (vm != p->p_vmspace) { 453 PROC_VMSPACE_UNLOCK(p); 454 vmspace_free(vm); 455 return (NULL); 456 } 457 PROC_VMSPACE_UNLOCK(p); 458 return (vm); 459 } 460 461 /* 462 * Switch between vmspaces in an AIO kernel process. 463 * 464 * The AIO kernel processes switch to and from a user process's 465 * vmspace while performing an I/O operation on behalf of a user 466 * process. The new vmspace is either the vmspace of a user process 467 * obtained from an active AIO request or the initial vmspace of the 468 * AIO kernel process (when it is idling). Because user processes 469 * will block to drain any active AIO requests before proceeding in 470 * exit() or execve(), the vmspace reference count for these vmspaces 471 * can never be 0. This allows for a much simpler implementation than 472 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel 473 * processes hold an extra reference on their initial vmspace for the 474 * life of the process so that this guarantee is true for any vmspace 475 * passed as 'newvm'. 476 */ 477 void 478 vmspace_switch_aio(struct vmspace *newvm) 479 { 480 struct vmspace *oldvm; 481 482 /* XXX: Need some way to assert that this is an aio daemon. */ 483 484 KASSERT(newvm->vm_refcnt > 0, 485 ("vmspace_switch_aio: newvm unreferenced")); 486 487 oldvm = curproc->p_vmspace; 488 if (oldvm == newvm) 489 return; 490 491 /* 492 * Point to the new address space and refer to it. 493 */ 494 curproc->p_vmspace = newvm; 495 atomic_add_int(&newvm->vm_refcnt, 1); 496 497 /* Activate the new mapping. */ 498 pmap_activate(curthread); 499 500 /* Remove the daemon's reference to the old address space. */ 501 KASSERT(oldvm->vm_refcnt > 1, 502 ("vmspace_switch_aio: oldvm dropping last reference")); 503 vmspace_free(oldvm); 504 } 505 506 void 507 _vm_map_lock(vm_map_t map, const char *file, int line) 508 { 509 510 if (map->system_map) 511 mtx_lock_flags_(&map->system_mtx, 0, file, line); 512 else 513 sx_xlock_(&map->lock, file, line); 514 map->timestamp++; 515 } 516 517 static void 518 vm_map_process_deferred(void) 519 { 520 struct thread *td; 521 vm_map_entry_t entry, next; 522 vm_object_t object; 523 524 td = curthread; 525 entry = td->td_map_def_user; 526 td->td_map_def_user = NULL; 527 while (entry != NULL) { 528 next = entry->next; 529 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 530 /* 531 * Decrement the object's writemappings and 532 * possibly the vnode's v_writecount. 533 */ 534 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 535 ("Submap with writecount")); 536 object = entry->object.vm_object; 537 KASSERT(object != NULL, ("No object for writecount")); 538 vnode_pager_release_writecount(object, entry->start, 539 entry->end); 540 } 541 vm_map_entry_deallocate(entry, FALSE); 542 entry = next; 543 } 544 } 545 546 void 547 _vm_map_unlock(vm_map_t map, const char *file, int line) 548 { 549 550 if (map->system_map) 551 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 552 else { 553 sx_xunlock_(&map->lock, file, line); 554 vm_map_process_deferred(); 555 } 556 } 557 558 void 559 _vm_map_lock_read(vm_map_t map, const char *file, int line) 560 { 561 562 if (map->system_map) 563 mtx_lock_flags_(&map->system_mtx, 0, file, line); 564 else 565 sx_slock_(&map->lock, file, line); 566 } 567 568 void 569 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 570 { 571 572 if (map->system_map) 573 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 574 else { 575 sx_sunlock_(&map->lock, file, line); 576 vm_map_process_deferred(); 577 } 578 } 579 580 int 581 _vm_map_trylock(vm_map_t map, const char *file, int line) 582 { 583 int error; 584 585 error = map->system_map ? 586 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 587 !sx_try_xlock_(&map->lock, file, line); 588 if (error == 0) 589 map->timestamp++; 590 return (error == 0); 591 } 592 593 int 594 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 595 { 596 int error; 597 598 error = map->system_map ? 599 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 600 !sx_try_slock_(&map->lock, file, line); 601 return (error == 0); 602 } 603 604 /* 605 * _vm_map_lock_upgrade: [ internal use only ] 606 * 607 * Tries to upgrade a read (shared) lock on the specified map to a write 608 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 609 * non-zero value if the upgrade fails. If the upgrade fails, the map is 610 * returned without a read or write lock held. 611 * 612 * Requires that the map be read locked. 613 */ 614 int 615 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 616 { 617 unsigned int last_timestamp; 618 619 if (map->system_map) { 620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 621 } else { 622 if (!sx_try_upgrade_(&map->lock, file, line)) { 623 last_timestamp = map->timestamp; 624 sx_sunlock_(&map->lock, file, line); 625 vm_map_process_deferred(); 626 /* 627 * If the map's timestamp does not change while the 628 * map is unlocked, then the upgrade succeeds. 629 */ 630 sx_xlock_(&map->lock, file, line); 631 if (last_timestamp != map->timestamp) { 632 sx_xunlock_(&map->lock, file, line); 633 return (1); 634 } 635 } 636 } 637 map->timestamp++; 638 return (0); 639 } 640 641 void 642 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 643 { 644 645 if (map->system_map) { 646 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 647 } else 648 sx_downgrade_(&map->lock, file, line); 649 } 650 651 /* 652 * vm_map_locked: 653 * 654 * Returns a non-zero value if the caller holds a write (exclusive) lock 655 * on the specified map and the value "0" otherwise. 656 */ 657 int 658 vm_map_locked(vm_map_t map) 659 { 660 661 if (map->system_map) 662 return (mtx_owned(&map->system_mtx)); 663 else 664 return (sx_xlocked(&map->lock)); 665 } 666 667 #ifdef INVARIANTS 668 static void 669 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 670 { 671 672 if (map->system_map) 673 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 674 else 675 sx_assert_(&map->lock, SA_XLOCKED, file, line); 676 } 677 678 #define VM_MAP_ASSERT_LOCKED(map) \ 679 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 680 #else 681 #define VM_MAP_ASSERT_LOCKED(map) 682 #endif 683 684 /* 685 * _vm_map_unlock_and_wait: 686 * 687 * Atomically releases the lock on the specified map and puts the calling 688 * thread to sleep. The calling thread will remain asleep until either 689 * vm_map_wakeup() is performed on the map or the specified timeout is 690 * exceeded. 691 * 692 * WARNING! This function does not perform deferred deallocations of 693 * objects and map entries. Therefore, the calling thread is expected to 694 * reacquire the map lock after reawakening and later perform an ordinary 695 * unlock operation, such as vm_map_unlock(), before completing its 696 * operation on the map. 697 */ 698 int 699 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 700 { 701 702 mtx_lock(&map_sleep_mtx); 703 if (map->system_map) 704 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 705 else 706 sx_xunlock_(&map->lock, file, line); 707 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 708 timo)); 709 } 710 711 /* 712 * vm_map_wakeup: 713 * 714 * Awaken any threads that have slept on the map using 715 * vm_map_unlock_and_wait(). 716 */ 717 void 718 vm_map_wakeup(vm_map_t map) 719 { 720 721 /* 722 * Acquire and release map_sleep_mtx to prevent a wakeup() 723 * from being performed (and lost) between the map unlock 724 * and the msleep() in _vm_map_unlock_and_wait(). 725 */ 726 mtx_lock(&map_sleep_mtx); 727 mtx_unlock(&map_sleep_mtx); 728 wakeup(&map->root); 729 } 730 731 void 732 vm_map_busy(vm_map_t map) 733 { 734 735 VM_MAP_ASSERT_LOCKED(map); 736 map->busy++; 737 } 738 739 void 740 vm_map_unbusy(vm_map_t map) 741 { 742 743 VM_MAP_ASSERT_LOCKED(map); 744 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 745 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 746 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 747 wakeup(&map->busy); 748 } 749 } 750 751 void 752 vm_map_wait_busy(vm_map_t map) 753 { 754 755 VM_MAP_ASSERT_LOCKED(map); 756 while (map->busy) { 757 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 758 if (map->system_map) 759 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 760 else 761 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 762 } 763 map->timestamp++; 764 } 765 766 long 767 vmspace_resident_count(struct vmspace *vmspace) 768 { 769 return pmap_resident_count(vmspace_pmap(vmspace)); 770 } 771 772 /* 773 * vm_map_create: 774 * 775 * Creates and returns a new empty VM map with 776 * the given physical map structure, and having 777 * the given lower and upper address bounds. 778 */ 779 vm_map_t 780 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 781 { 782 vm_map_t result; 783 784 result = uma_zalloc(mapzone, M_WAITOK); 785 CTR1(KTR_VM, "vm_map_create: %p", result); 786 _vm_map_init(result, pmap, min, max); 787 return (result); 788 } 789 790 /* 791 * Initialize an existing vm_map structure 792 * such as that in the vmspace structure. 793 */ 794 static void 795 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 796 { 797 798 map->header.next = map->header.prev = &map->header; 799 map->needs_wakeup = FALSE; 800 map->system_map = 0; 801 map->pmap = pmap; 802 map->min_offset = min; 803 map->max_offset = max; 804 map->flags = 0; 805 map->root = NULL; 806 map->timestamp = 0; 807 map->busy = 0; 808 } 809 810 void 811 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 812 { 813 814 _vm_map_init(map, pmap, min, max); 815 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 816 sx_init(&map->lock, "user map"); 817 } 818 819 /* 820 * vm_map_entry_dispose: [ internal use only ] 821 * 822 * Inverse of vm_map_entry_create. 823 */ 824 static void 825 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 826 { 827 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 828 } 829 830 /* 831 * vm_map_entry_create: [ internal use only ] 832 * 833 * Allocates a VM map entry for insertion. 834 * No entry fields are filled in. 835 */ 836 static vm_map_entry_t 837 vm_map_entry_create(vm_map_t map) 838 { 839 vm_map_entry_t new_entry; 840 841 if (map->system_map) 842 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 843 else 844 new_entry = uma_zalloc(mapentzone, M_WAITOK); 845 if (new_entry == NULL) 846 panic("vm_map_entry_create: kernel resources exhausted"); 847 return (new_entry); 848 } 849 850 /* 851 * vm_map_entry_set_behavior: 852 * 853 * Set the expected access behavior, either normal, random, or 854 * sequential. 855 */ 856 static inline void 857 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 858 { 859 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 860 (behavior & MAP_ENTRY_BEHAV_MASK); 861 } 862 863 /* 864 * vm_map_entry_set_max_free: 865 * 866 * Set the max_free field in a vm_map_entry. 867 */ 868 static inline void 869 vm_map_entry_set_max_free(vm_map_entry_t entry) 870 { 871 872 entry->max_free = entry->adj_free; 873 if (entry->left != NULL && entry->left->max_free > entry->max_free) 874 entry->max_free = entry->left->max_free; 875 if (entry->right != NULL && entry->right->max_free > entry->max_free) 876 entry->max_free = entry->right->max_free; 877 } 878 879 /* 880 * vm_map_entry_splay: 881 * 882 * The Sleator and Tarjan top-down splay algorithm with the 883 * following variation. Max_free must be computed bottom-up, so 884 * on the downward pass, maintain the left and right spines in 885 * reverse order. Then, make a second pass up each side to fix 886 * the pointers and compute max_free. The time bound is O(log n) 887 * amortized. 888 * 889 * The new root is the vm_map_entry containing "addr", or else an 890 * adjacent entry (lower or higher) if addr is not in the tree. 891 * 892 * The map must be locked, and leaves it so. 893 * 894 * Returns: the new root. 895 */ 896 static vm_map_entry_t 897 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 898 { 899 vm_map_entry_t llist, rlist; 900 vm_map_entry_t ltree, rtree; 901 vm_map_entry_t y; 902 903 /* Special case of empty tree. */ 904 if (root == NULL) 905 return (root); 906 907 /* 908 * Pass One: Splay down the tree until we find addr or a NULL 909 * pointer where addr would go. llist and rlist are the two 910 * sides in reverse order (bottom-up), with llist linked by 911 * the right pointer and rlist linked by the left pointer in 912 * the vm_map_entry. Wait until Pass Two to set max_free on 913 * the two spines. 914 */ 915 llist = NULL; 916 rlist = NULL; 917 for (;;) { 918 /* root is never NULL in here. */ 919 if (addr < root->start) { 920 y = root->left; 921 if (y == NULL) 922 break; 923 if (addr < y->start && y->left != NULL) { 924 /* Rotate right and put y on rlist. */ 925 root->left = y->right; 926 y->right = root; 927 vm_map_entry_set_max_free(root); 928 root = y->left; 929 y->left = rlist; 930 rlist = y; 931 } else { 932 /* Put root on rlist. */ 933 root->left = rlist; 934 rlist = root; 935 root = y; 936 } 937 } else if (addr >= root->end) { 938 y = root->right; 939 if (y == NULL) 940 break; 941 if (addr >= y->end && y->right != NULL) { 942 /* Rotate left and put y on llist. */ 943 root->right = y->left; 944 y->left = root; 945 vm_map_entry_set_max_free(root); 946 root = y->right; 947 y->right = llist; 948 llist = y; 949 } else { 950 /* Put root on llist. */ 951 root->right = llist; 952 llist = root; 953 root = y; 954 } 955 } else 956 break; 957 } 958 959 /* 960 * Pass Two: Walk back up the two spines, flip the pointers 961 * and set max_free. The subtrees of the root go at the 962 * bottom of llist and rlist. 963 */ 964 ltree = root->left; 965 while (llist != NULL) { 966 y = llist->right; 967 llist->right = ltree; 968 vm_map_entry_set_max_free(llist); 969 ltree = llist; 970 llist = y; 971 } 972 rtree = root->right; 973 while (rlist != NULL) { 974 y = rlist->left; 975 rlist->left = rtree; 976 vm_map_entry_set_max_free(rlist); 977 rtree = rlist; 978 rlist = y; 979 } 980 981 /* 982 * Final assembly: add ltree and rtree as subtrees of root. 983 */ 984 root->left = ltree; 985 root->right = rtree; 986 vm_map_entry_set_max_free(root); 987 988 return (root); 989 } 990 991 /* 992 * vm_map_entry_{un,}link: 993 * 994 * Insert/remove entries from maps. 995 */ 996 static void 997 vm_map_entry_link(vm_map_t map, 998 vm_map_entry_t after_where, 999 vm_map_entry_t entry) 1000 { 1001 1002 CTR4(KTR_VM, 1003 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 1004 map->nentries, entry, after_where); 1005 VM_MAP_ASSERT_LOCKED(map); 1006 KASSERT(after_where->end <= entry->start, 1007 ("vm_map_entry_link: prev end %jx new start %jx overlap", 1008 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 1009 KASSERT(entry->end <= after_where->next->start, 1010 ("vm_map_entry_link: new end %jx next start %jx overlap", 1011 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 1012 1013 map->nentries++; 1014 entry->prev = after_where; 1015 entry->next = after_where->next; 1016 entry->next->prev = entry; 1017 after_where->next = entry; 1018 1019 if (after_where != &map->header) { 1020 if (after_where != map->root) 1021 vm_map_entry_splay(after_where->start, map->root); 1022 entry->right = after_where->right; 1023 entry->left = after_where; 1024 after_where->right = NULL; 1025 after_where->adj_free = entry->start - after_where->end; 1026 vm_map_entry_set_max_free(after_where); 1027 } else { 1028 entry->right = map->root; 1029 entry->left = NULL; 1030 } 1031 entry->adj_free = entry->next->start - entry->end; 1032 vm_map_entry_set_max_free(entry); 1033 map->root = entry; 1034 } 1035 1036 static void 1037 vm_map_entry_unlink(vm_map_t map, 1038 vm_map_entry_t entry) 1039 { 1040 vm_map_entry_t next, prev, root; 1041 1042 VM_MAP_ASSERT_LOCKED(map); 1043 if (entry != map->root) 1044 vm_map_entry_splay(entry->start, map->root); 1045 if (entry->left == NULL) 1046 root = entry->right; 1047 else { 1048 root = vm_map_entry_splay(entry->start, entry->left); 1049 root->right = entry->right; 1050 root->adj_free = entry->next->start - root->end; 1051 vm_map_entry_set_max_free(root); 1052 } 1053 map->root = root; 1054 1055 prev = entry->prev; 1056 next = entry->next; 1057 next->prev = prev; 1058 prev->next = next; 1059 map->nentries--; 1060 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1061 map->nentries, entry); 1062 } 1063 1064 /* 1065 * vm_map_entry_resize_free: 1066 * 1067 * Recompute the amount of free space following a vm_map_entry 1068 * and propagate that value up the tree. Call this function after 1069 * resizing a map entry in-place, that is, without a call to 1070 * vm_map_entry_link() or _unlink(). 1071 * 1072 * The map must be locked, and leaves it so. 1073 */ 1074 static void 1075 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1076 { 1077 1078 /* 1079 * Using splay trees without parent pointers, propagating 1080 * max_free up the tree is done by moving the entry to the 1081 * root and making the change there. 1082 */ 1083 if (entry != map->root) 1084 map->root = vm_map_entry_splay(entry->start, map->root); 1085 1086 entry->adj_free = entry->next->start - entry->end; 1087 vm_map_entry_set_max_free(entry); 1088 } 1089 1090 /* 1091 * vm_map_lookup_entry: [ internal use only ] 1092 * 1093 * Finds the map entry containing (or 1094 * immediately preceding) the specified address 1095 * in the given map; the entry is returned 1096 * in the "entry" parameter. The boolean 1097 * result indicates whether the address is 1098 * actually contained in the map. 1099 */ 1100 boolean_t 1101 vm_map_lookup_entry( 1102 vm_map_t map, 1103 vm_offset_t address, 1104 vm_map_entry_t *entry) /* OUT */ 1105 { 1106 vm_map_entry_t cur; 1107 boolean_t locked; 1108 1109 /* 1110 * If the map is empty, then the map entry immediately preceding 1111 * "address" is the map's header. 1112 */ 1113 cur = map->root; 1114 if (cur == NULL) 1115 *entry = &map->header; 1116 else if (address >= cur->start && cur->end > address) { 1117 *entry = cur; 1118 return (TRUE); 1119 } else if ((locked = vm_map_locked(map)) || 1120 sx_try_upgrade(&map->lock)) { 1121 /* 1122 * Splay requires a write lock on the map. However, it only 1123 * restructures the binary search tree; it does not otherwise 1124 * change the map. Thus, the map's timestamp need not change 1125 * on a temporary upgrade. 1126 */ 1127 map->root = cur = vm_map_entry_splay(address, cur); 1128 if (!locked) 1129 sx_downgrade(&map->lock); 1130 1131 /* 1132 * If "address" is contained within a map entry, the new root 1133 * is that map entry. Otherwise, the new root is a map entry 1134 * immediately before or after "address". 1135 */ 1136 if (address >= cur->start) { 1137 *entry = cur; 1138 if (cur->end > address) 1139 return (TRUE); 1140 } else 1141 *entry = cur->prev; 1142 } else 1143 /* 1144 * Since the map is only locked for read access, perform a 1145 * standard binary search tree lookup for "address". 1146 */ 1147 for (;;) { 1148 if (address < cur->start) { 1149 if (cur->left == NULL) { 1150 *entry = cur->prev; 1151 break; 1152 } 1153 cur = cur->left; 1154 } else if (cur->end > address) { 1155 *entry = cur; 1156 return (TRUE); 1157 } else { 1158 if (cur->right == NULL) { 1159 *entry = cur; 1160 break; 1161 } 1162 cur = cur->right; 1163 } 1164 } 1165 return (FALSE); 1166 } 1167 1168 /* 1169 * vm_map_insert: 1170 * 1171 * Inserts the given whole VM object into the target 1172 * map at the specified address range. The object's 1173 * size should match that of the address range. 1174 * 1175 * Requires that the map be locked, and leaves it so. 1176 * 1177 * If object is non-NULL, ref count must be bumped by caller 1178 * prior to making call to account for the new entry. 1179 */ 1180 int 1181 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1182 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1183 { 1184 vm_map_entry_t new_entry, prev_entry, temp_entry; 1185 struct ucred *cred; 1186 vm_eflags_t protoeflags; 1187 vm_inherit_t inheritance; 1188 1189 VM_MAP_ASSERT_LOCKED(map); 1190 KASSERT(object != kernel_object || 1191 (cow & MAP_COPY_ON_WRITE) == 0, 1192 ("vm_map_insert: kernel object and COW")); 1193 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1194 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1195 KASSERT((prot & ~max) == 0, 1196 ("prot %#x is not subset of max_prot %#x", prot, max)); 1197 1198 /* 1199 * Check that the start and end points are not bogus. 1200 */ 1201 if (start < map->min_offset || end > map->max_offset || start >= end) 1202 return (KERN_INVALID_ADDRESS); 1203 1204 /* 1205 * Find the entry prior to the proposed starting address; if it's part 1206 * of an existing entry, this range is bogus. 1207 */ 1208 if (vm_map_lookup_entry(map, start, &temp_entry)) 1209 return (KERN_NO_SPACE); 1210 1211 prev_entry = temp_entry; 1212 1213 /* 1214 * Assert that the next entry doesn't overlap the end point. 1215 */ 1216 if (prev_entry->next->start < end) 1217 return (KERN_NO_SPACE); 1218 1219 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1220 max != VM_PROT_NONE)) 1221 return (KERN_INVALID_ARGUMENT); 1222 1223 protoeflags = 0; 1224 if (cow & MAP_COPY_ON_WRITE) 1225 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1226 if (cow & MAP_NOFAULT) 1227 protoeflags |= MAP_ENTRY_NOFAULT; 1228 if (cow & MAP_DISABLE_SYNCER) 1229 protoeflags |= MAP_ENTRY_NOSYNC; 1230 if (cow & MAP_DISABLE_COREDUMP) 1231 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1232 if (cow & MAP_STACK_GROWS_DOWN) 1233 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1234 if (cow & MAP_STACK_GROWS_UP) 1235 protoeflags |= MAP_ENTRY_GROWS_UP; 1236 if (cow & MAP_VN_WRITECOUNT) 1237 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1238 if ((cow & MAP_CREATE_GUARD) != 0) 1239 protoeflags |= MAP_ENTRY_GUARD; 1240 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1241 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1242 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1243 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1244 if (cow & MAP_INHERIT_SHARE) 1245 inheritance = VM_INHERIT_SHARE; 1246 else 1247 inheritance = VM_INHERIT_DEFAULT; 1248 1249 cred = NULL; 1250 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1251 goto charged; 1252 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1253 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1254 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1255 return (KERN_RESOURCE_SHORTAGE); 1256 KASSERT(object == NULL || 1257 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1258 object->cred == NULL, 1259 ("overcommit: vm_map_insert o %p", object)); 1260 cred = curthread->td_ucred; 1261 } 1262 1263 charged: 1264 /* Expand the kernel pmap, if necessary. */ 1265 if (map == kernel_map && end > kernel_vm_end) 1266 pmap_growkernel(end); 1267 if (object != NULL) { 1268 /* 1269 * OBJ_ONEMAPPING must be cleared unless this mapping 1270 * is trivially proven to be the only mapping for any 1271 * of the object's pages. (Object granularity 1272 * reference counting is insufficient to recognize 1273 * aliases with precision.) 1274 */ 1275 VM_OBJECT_WLOCK(object); 1276 if (object->ref_count > 1 || object->shadow_count != 0) 1277 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1278 VM_OBJECT_WUNLOCK(object); 1279 } else if (prev_entry != &map->header && 1280 prev_entry->eflags == protoeflags && 1281 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1282 prev_entry->end == start && prev_entry->wired_count == 0 && 1283 (prev_entry->cred == cred || 1284 (prev_entry->object.vm_object != NULL && 1285 prev_entry->object.vm_object->cred == cred)) && 1286 vm_object_coalesce(prev_entry->object.vm_object, 1287 prev_entry->offset, 1288 (vm_size_t)(prev_entry->end - prev_entry->start), 1289 (vm_size_t)(end - prev_entry->end), cred != NULL && 1290 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1291 /* 1292 * We were able to extend the object. Determine if we 1293 * can extend the previous map entry to include the 1294 * new range as well. 1295 */ 1296 if (prev_entry->inheritance == inheritance && 1297 prev_entry->protection == prot && 1298 prev_entry->max_protection == max) { 1299 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1300 map->size += end - prev_entry->end; 1301 prev_entry->end = end; 1302 vm_map_entry_resize_free(map, prev_entry); 1303 vm_map_simplify_entry(map, prev_entry); 1304 return (KERN_SUCCESS); 1305 } 1306 1307 /* 1308 * If we can extend the object but cannot extend the 1309 * map entry, we have to create a new map entry. We 1310 * must bump the ref count on the extended object to 1311 * account for it. object may be NULL. 1312 */ 1313 object = prev_entry->object.vm_object; 1314 offset = prev_entry->offset + 1315 (prev_entry->end - prev_entry->start); 1316 vm_object_reference(object); 1317 if (cred != NULL && object != NULL && object->cred != NULL && 1318 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1319 /* Object already accounts for this uid. */ 1320 cred = NULL; 1321 } 1322 } 1323 if (cred != NULL) 1324 crhold(cred); 1325 1326 /* 1327 * Create a new entry 1328 */ 1329 new_entry = vm_map_entry_create(map); 1330 new_entry->start = start; 1331 new_entry->end = end; 1332 new_entry->cred = NULL; 1333 1334 new_entry->eflags = protoeflags; 1335 new_entry->object.vm_object = object; 1336 new_entry->offset = offset; 1337 1338 new_entry->inheritance = inheritance; 1339 new_entry->protection = prot; 1340 new_entry->max_protection = max; 1341 new_entry->wired_count = 0; 1342 new_entry->wiring_thread = NULL; 1343 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1344 new_entry->next_read = start; 1345 1346 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1347 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1348 new_entry->cred = cred; 1349 1350 /* 1351 * Insert the new entry into the list 1352 */ 1353 vm_map_entry_link(map, prev_entry, new_entry); 1354 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1355 map->size += new_entry->end - new_entry->start; 1356 1357 /* 1358 * Try to coalesce the new entry with both the previous and next 1359 * entries in the list. Previously, we only attempted to coalesce 1360 * with the previous entry when object is NULL. Here, we handle the 1361 * other cases, which are less common. 1362 */ 1363 vm_map_simplify_entry(map, new_entry); 1364 1365 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1366 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1367 end - start, cow & MAP_PREFAULT_PARTIAL); 1368 } 1369 1370 return (KERN_SUCCESS); 1371 } 1372 1373 /* 1374 * vm_map_findspace: 1375 * 1376 * Find the first fit (lowest VM address) for "length" free bytes 1377 * beginning at address >= start in the given map. 1378 * 1379 * In a vm_map_entry, "adj_free" is the amount of free space 1380 * adjacent (higher address) to this entry, and "max_free" is the 1381 * maximum amount of contiguous free space in its subtree. This 1382 * allows finding a free region in one path down the tree, so 1383 * O(log n) amortized with splay trees. 1384 * 1385 * The map must be locked, and leaves it so. 1386 * 1387 * Returns: 0 on success, and starting address in *addr, 1388 * 1 if insufficient space. 1389 */ 1390 int 1391 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1392 vm_offset_t *addr) /* OUT */ 1393 { 1394 vm_map_entry_t entry; 1395 vm_offset_t st; 1396 1397 /* 1398 * Request must fit within min/max VM address and must avoid 1399 * address wrap. 1400 */ 1401 if (start < map->min_offset) 1402 start = map->min_offset; 1403 if (start + length > map->max_offset || start + length < start) 1404 return (1); 1405 1406 /* Empty tree means wide open address space. */ 1407 if (map->root == NULL) { 1408 *addr = start; 1409 return (0); 1410 } 1411 1412 /* 1413 * After splay, if start comes before root node, then there 1414 * must be a gap from start to the root. 1415 */ 1416 map->root = vm_map_entry_splay(start, map->root); 1417 if (start + length <= map->root->start) { 1418 *addr = start; 1419 return (0); 1420 } 1421 1422 /* 1423 * Root is the last node that might begin its gap before 1424 * start, and this is the last comparison where address 1425 * wrap might be a problem. 1426 */ 1427 st = (start > map->root->end) ? start : map->root->end; 1428 if (length <= map->root->end + map->root->adj_free - st) { 1429 *addr = st; 1430 return (0); 1431 } 1432 1433 /* With max_free, can immediately tell if no solution. */ 1434 entry = map->root->right; 1435 if (entry == NULL || length > entry->max_free) 1436 return (1); 1437 1438 /* 1439 * Search the right subtree in the order: left subtree, root, 1440 * right subtree (first fit). The previous splay implies that 1441 * all regions in the right subtree have addresses > start. 1442 */ 1443 while (entry != NULL) { 1444 if (entry->left != NULL && entry->left->max_free >= length) 1445 entry = entry->left; 1446 else if (entry->adj_free >= length) { 1447 *addr = entry->end; 1448 return (0); 1449 } else 1450 entry = entry->right; 1451 } 1452 1453 /* Can't get here, so panic if we do. */ 1454 panic("vm_map_findspace: max_free corrupt"); 1455 } 1456 1457 int 1458 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1459 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1460 vm_prot_t max, int cow) 1461 { 1462 vm_offset_t end; 1463 int result; 1464 1465 end = start + length; 1466 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1467 object == NULL, 1468 ("vm_map_fixed: non-NULL backing object for stack")); 1469 vm_map_lock(map); 1470 VM_MAP_RANGE_CHECK(map, start, end); 1471 if ((cow & MAP_CHECK_EXCL) == 0) 1472 vm_map_delete(map, start, end); 1473 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1474 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1475 prot, max, cow); 1476 } else { 1477 result = vm_map_insert(map, object, offset, start, end, 1478 prot, max, cow); 1479 } 1480 vm_map_unlock(map); 1481 return (result); 1482 } 1483 1484 /* 1485 * Searches for the specified amount of free space in the given map with the 1486 * specified alignment. Performs an address-ordered, first-fit search from 1487 * the given address "*addr", with an optional upper bound "max_addr". If the 1488 * parameter "alignment" is zero, then the alignment is computed from the 1489 * given (object, offset) pair so as to enable the greatest possible use of 1490 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1491 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1492 * 1493 * The map must be locked. Initially, there must be at least "length" bytes 1494 * of free space at the given address. 1495 */ 1496 static int 1497 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1498 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1499 vm_offset_t alignment) 1500 { 1501 vm_offset_t aligned_addr, free_addr; 1502 1503 VM_MAP_ASSERT_LOCKED(map); 1504 free_addr = *addr; 1505 KASSERT(!vm_map_findspace(map, free_addr, length, addr) && 1506 free_addr == *addr, ("caller provided insufficient free space")); 1507 for (;;) { 1508 /* 1509 * At the start of every iteration, the free space at address 1510 * "*addr" is at least "length" bytes. 1511 */ 1512 if (alignment == 0) 1513 pmap_align_superpage(object, offset, addr, length); 1514 else if ((*addr & (alignment - 1)) != 0) { 1515 *addr &= ~(alignment - 1); 1516 *addr += alignment; 1517 } 1518 aligned_addr = *addr; 1519 if (aligned_addr == free_addr) { 1520 /* 1521 * Alignment did not change "*addr", so "*addr" must 1522 * still provide sufficient free space. 1523 */ 1524 return (KERN_SUCCESS); 1525 } 1526 1527 /* 1528 * Test for address wrap on "*addr". A wrapped "*addr" could 1529 * be a valid address, in which case vm_map_findspace() cannot 1530 * be relied upon to fail. 1531 */ 1532 if (aligned_addr < free_addr || 1533 vm_map_findspace(map, aligned_addr, length, addr) || 1534 (max_addr != 0 && *addr + length > max_addr)) 1535 return (KERN_NO_SPACE); 1536 free_addr = *addr; 1537 if (free_addr == aligned_addr) { 1538 /* 1539 * If a successful call to vm_map_findspace() did not 1540 * change "*addr", then "*addr" must still be aligned 1541 * and provide sufficient free space. 1542 */ 1543 return (KERN_SUCCESS); 1544 } 1545 } 1546 } 1547 1548 /* 1549 * vm_map_find finds an unallocated region in the target address 1550 * map with the given length. The search is defined to be 1551 * first-fit from the specified address; the region found is 1552 * returned in the same parameter. 1553 * 1554 * If object is non-NULL, ref count must be bumped by caller 1555 * prior to making call to account for the new entry. 1556 */ 1557 int 1558 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1559 vm_offset_t *addr, /* IN/OUT */ 1560 vm_size_t length, vm_offset_t max_addr, int find_space, 1561 vm_prot_t prot, vm_prot_t max, int cow) 1562 { 1563 vm_offset_t alignment, min_addr; 1564 int rv; 1565 1566 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1567 object == NULL, 1568 ("vm_map_find: non-NULL backing object for stack")); 1569 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1570 (object->flags & OBJ_COLORED) == 0)) 1571 find_space = VMFS_ANY_SPACE; 1572 if (find_space >> 8 != 0) { 1573 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1574 alignment = (vm_offset_t)1 << (find_space >> 8); 1575 } else 1576 alignment = 0; 1577 vm_map_lock(map); 1578 if (find_space != VMFS_NO_SPACE) { 1579 KASSERT(find_space == VMFS_ANY_SPACE || 1580 find_space == VMFS_OPTIMAL_SPACE || 1581 find_space == VMFS_SUPER_SPACE || 1582 alignment != 0, ("unexpected VMFS flag")); 1583 min_addr = *addr; 1584 again: 1585 if (vm_map_findspace(map, min_addr, length, addr) || 1586 (max_addr != 0 && *addr + length > max_addr)) { 1587 rv = KERN_NO_SPACE; 1588 goto done; 1589 } 1590 if (find_space != VMFS_ANY_SPACE && 1591 (rv = vm_map_alignspace(map, object, offset, addr, length, 1592 max_addr, alignment)) != KERN_SUCCESS) { 1593 if (find_space == VMFS_OPTIMAL_SPACE) { 1594 find_space = VMFS_ANY_SPACE; 1595 goto again; 1596 } 1597 goto done; 1598 } 1599 } 1600 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1601 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1602 max, cow); 1603 } else { 1604 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1605 prot, max, cow); 1606 } 1607 done: 1608 vm_map_unlock(map); 1609 return (rv); 1610 } 1611 1612 /* 1613 * vm_map_find_min() is a variant of vm_map_find() that takes an 1614 * additional parameter (min_addr) and treats the given address 1615 * (*addr) differently. Specifically, it treats *addr as a hint 1616 * and not as the minimum address where the mapping is created. 1617 * 1618 * This function works in two phases. First, it tries to 1619 * allocate above the hint. If that fails and the hint is 1620 * greater than min_addr, it performs a second pass, replacing 1621 * the hint with min_addr as the minimum address for the 1622 * allocation. 1623 */ 1624 int 1625 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1626 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1627 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1628 int cow) 1629 { 1630 vm_offset_t hint; 1631 int rv; 1632 1633 hint = *addr; 1634 for (;;) { 1635 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1636 find_space, prot, max, cow); 1637 if (rv == KERN_SUCCESS || min_addr >= hint) 1638 return (rv); 1639 *addr = hint = min_addr; 1640 } 1641 } 1642 1643 /* 1644 * vm_map_simplify_entry: 1645 * 1646 * Simplify the given map entry by merging with either neighbor. This 1647 * routine also has the ability to merge with both neighbors. 1648 * 1649 * The map must be locked. 1650 * 1651 * This routine guarantees that the passed entry remains valid (though 1652 * possibly extended). When merging, this routine may delete one or 1653 * both neighbors. 1654 */ 1655 void 1656 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1657 { 1658 vm_map_entry_t next, prev; 1659 vm_size_t prevsize, esize; 1660 1661 if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1662 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1663 return; 1664 1665 prev = entry->prev; 1666 if (prev != &map->header) { 1667 prevsize = prev->end - prev->start; 1668 if ( (prev->end == entry->start) && 1669 (prev->object.vm_object == entry->object.vm_object) && 1670 (!prev->object.vm_object || 1671 (prev->offset + prevsize == entry->offset)) && 1672 (prev->eflags == entry->eflags) && 1673 (prev->protection == entry->protection) && 1674 (prev->max_protection == entry->max_protection) && 1675 (prev->inheritance == entry->inheritance) && 1676 (prev->wired_count == entry->wired_count) && 1677 (prev->cred == entry->cred)) { 1678 vm_map_entry_unlink(map, prev); 1679 entry->start = prev->start; 1680 entry->offset = prev->offset; 1681 if (entry->prev != &map->header) 1682 vm_map_entry_resize_free(map, entry->prev); 1683 1684 /* 1685 * If the backing object is a vnode object, 1686 * vm_object_deallocate() calls vrele(). 1687 * However, vrele() does not lock the vnode 1688 * because the vnode has additional 1689 * references. Thus, the map lock can be kept 1690 * without causing a lock-order reversal with 1691 * the vnode lock. 1692 * 1693 * Since we count the number of virtual page 1694 * mappings in object->un_pager.vnp.writemappings, 1695 * the writemappings value should not be adjusted 1696 * when the entry is disposed of. 1697 */ 1698 if (prev->object.vm_object) 1699 vm_object_deallocate(prev->object.vm_object); 1700 if (prev->cred != NULL) 1701 crfree(prev->cred); 1702 vm_map_entry_dispose(map, prev); 1703 } 1704 } 1705 1706 next = entry->next; 1707 if (next != &map->header) { 1708 esize = entry->end - entry->start; 1709 if ((entry->end == next->start) && 1710 (next->object.vm_object == entry->object.vm_object) && 1711 (!entry->object.vm_object || 1712 (entry->offset + esize == next->offset)) && 1713 (next->eflags == entry->eflags) && 1714 (next->protection == entry->protection) && 1715 (next->max_protection == entry->max_protection) && 1716 (next->inheritance == entry->inheritance) && 1717 (next->wired_count == entry->wired_count) && 1718 (next->cred == entry->cred)) { 1719 vm_map_entry_unlink(map, next); 1720 entry->end = next->end; 1721 vm_map_entry_resize_free(map, entry); 1722 1723 /* 1724 * See comment above. 1725 */ 1726 if (next->object.vm_object) 1727 vm_object_deallocate(next->object.vm_object); 1728 if (next->cred != NULL) 1729 crfree(next->cred); 1730 vm_map_entry_dispose(map, next); 1731 } 1732 } 1733 } 1734 /* 1735 * vm_map_clip_start: [ internal use only ] 1736 * 1737 * Asserts that the given entry begins at or after 1738 * the specified address; if necessary, 1739 * it splits the entry into two. 1740 */ 1741 #define vm_map_clip_start(map, entry, startaddr) \ 1742 { \ 1743 if (startaddr > entry->start) \ 1744 _vm_map_clip_start(map, entry, startaddr); \ 1745 } 1746 1747 /* 1748 * This routine is called only when it is known that 1749 * the entry must be split. 1750 */ 1751 static void 1752 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1753 { 1754 vm_map_entry_t new_entry; 1755 1756 VM_MAP_ASSERT_LOCKED(map); 1757 KASSERT(entry->end > start && entry->start < start, 1758 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 1759 1760 /* 1761 * Split off the front portion -- note that we must insert the new 1762 * entry BEFORE this one, so that this entry has the specified 1763 * starting address. 1764 */ 1765 vm_map_simplify_entry(map, entry); 1766 1767 /* 1768 * If there is no object backing this entry, we might as well create 1769 * one now. If we defer it, an object can get created after the map 1770 * is clipped, and individual objects will be created for the split-up 1771 * map. This is a bit of a hack, but is also about the best place to 1772 * put this improvement. 1773 */ 1774 if (entry->object.vm_object == NULL && !map->system_map && 1775 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1776 vm_object_t object; 1777 object = vm_object_allocate(OBJT_DEFAULT, 1778 atop(entry->end - entry->start)); 1779 entry->object.vm_object = object; 1780 entry->offset = 0; 1781 if (entry->cred != NULL) { 1782 object->cred = entry->cred; 1783 object->charge = entry->end - entry->start; 1784 entry->cred = NULL; 1785 } 1786 } else if (entry->object.vm_object != NULL && 1787 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1788 entry->cred != NULL) { 1789 VM_OBJECT_WLOCK(entry->object.vm_object); 1790 KASSERT(entry->object.vm_object->cred == NULL, 1791 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1792 entry->object.vm_object->cred = entry->cred; 1793 entry->object.vm_object->charge = entry->end - entry->start; 1794 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1795 entry->cred = NULL; 1796 } 1797 1798 new_entry = vm_map_entry_create(map); 1799 *new_entry = *entry; 1800 1801 new_entry->end = start; 1802 entry->offset += (start - entry->start); 1803 entry->start = start; 1804 if (new_entry->cred != NULL) 1805 crhold(entry->cred); 1806 1807 vm_map_entry_link(map, entry->prev, new_entry); 1808 1809 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1810 vm_object_reference(new_entry->object.vm_object); 1811 /* 1812 * The object->un_pager.vnp.writemappings for the 1813 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1814 * kept as is here. The virtual pages are 1815 * re-distributed among the clipped entries, so the sum is 1816 * left the same. 1817 */ 1818 } 1819 } 1820 1821 /* 1822 * vm_map_clip_end: [ internal use only ] 1823 * 1824 * Asserts that the given entry ends at or before 1825 * the specified address; if necessary, 1826 * it splits the entry into two. 1827 */ 1828 #define vm_map_clip_end(map, entry, endaddr) \ 1829 { \ 1830 if ((endaddr) < (entry->end)) \ 1831 _vm_map_clip_end((map), (entry), (endaddr)); \ 1832 } 1833 1834 /* 1835 * This routine is called only when it is known that 1836 * the entry must be split. 1837 */ 1838 static void 1839 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1840 { 1841 vm_map_entry_t new_entry; 1842 1843 VM_MAP_ASSERT_LOCKED(map); 1844 KASSERT(entry->start < end && entry->end > end, 1845 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 1846 1847 /* 1848 * If there is no object backing this entry, we might as well create 1849 * one now. If we defer it, an object can get created after the map 1850 * is clipped, and individual objects will be created for the split-up 1851 * map. This is a bit of a hack, but is also about the best place to 1852 * put this improvement. 1853 */ 1854 if (entry->object.vm_object == NULL && !map->system_map && 1855 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1856 vm_object_t object; 1857 object = vm_object_allocate(OBJT_DEFAULT, 1858 atop(entry->end - entry->start)); 1859 entry->object.vm_object = object; 1860 entry->offset = 0; 1861 if (entry->cred != NULL) { 1862 object->cred = entry->cred; 1863 object->charge = entry->end - entry->start; 1864 entry->cred = NULL; 1865 } 1866 } else if (entry->object.vm_object != NULL && 1867 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1868 entry->cred != NULL) { 1869 VM_OBJECT_WLOCK(entry->object.vm_object); 1870 KASSERT(entry->object.vm_object->cred == NULL, 1871 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1872 entry->object.vm_object->cred = entry->cred; 1873 entry->object.vm_object->charge = entry->end - entry->start; 1874 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1875 entry->cred = NULL; 1876 } 1877 1878 /* 1879 * Create a new entry and insert it AFTER the specified entry 1880 */ 1881 new_entry = vm_map_entry_create(map); 1882 *new_entry = *entry; 1883 1884 new_entry->start = entry->end = end; 1885 new_entry->offset += (end - entry->start); 1886 if (new_entry->cred != NULL) 1887 crhold(entry->cred); 1888 1889 vm_map_entry_link(map, entry, new_entry); 1890 1891 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1892 vm_object_reference(new_entry->object.vm_object); 1893 } 1894 } 1895 1896 /* 1897 * vm_map_submap: [ kernel use only ] 1898 * 1899 * Mark the given range as handled by a subordinate map. 1900 * 1901 * This range must have been created with vm_map_find, 1902 * and no other operations may have been performed on this 1903 * range prior to calling vm_map_submap. 1904 * 1905 * Only a limited number of operations can be performed 1906 * within this rage after calling vm_map_submap: 1907 * vm_fault 1908 * [Don't try vm_map_copy!] 1909 * 1910 * To remove a submapping, one must first remove the 1911 * range from the superior map, and then destroy the 1912 * submap (if desired). [Better yet, don't try it.] 1913 */ 1914 int 1915 vm_map_submap( 1916 vm_map_t map, 1917 vm_offset_t start, 1918 vm_offset_t end, 1919 vm_map_t submap) 1920 { 1921 vm_map_entry_t entry; 1922 int result = KERN_INVALID_ARGUMENT; 1923 1924 vm_map_lock(map); 1925 1926 VM_MAP_RANGE_CHECK(map, start, end); 1927 1928 if (vm_map_lookup_entry(map, start, &entry)) { 1929 vm_map_clip_start(map, entry, start); 1930 } else 1931 entry = entry->next; 1932 1933 vm_map_clip_end(map, entry, end); 1934 1935 if ((entry->start == start) && (entry->end == end) && 1936 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1937 (entry->object.vm_object == NULL)) { 1938 entry->object.sub_map = submap; 1939 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1940 result = KERN_SUCCESS; 1941 } 1942 vm_map_unlock(map); 1943 1944 return (result); 1945 } 1946 1947 /* 1948 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1949 */ 1950 #define MAX_INIT_PT 96 1951 1952 /* 1953 * vm_map_pmap_enter: 1954 * 1955 * Preload the specified map's pmap with mappings to the specified 1956 * object's memory-resident pages. No further physical pages are 1957 * allocated, and no further virtual pages are retrieved from secondary 1958 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1959 * limited number of page mappings are created at the low-end of the 1960 * specified address range. (For this purpose, a superpage mapping 1961 * counts as one page mapping.) Otherwise, all resident pages within 1962 * the specified address range are mapped. 1963 */ 1964 static void 1965 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1966 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1967 { 1968 vm_offset_t start; 1969 vm_page_t p, p_start; 1970 vm_pindex_t mask, psize, threshold, tmpidx; 1971 1972 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1973 return; 1974 VM_OBJECT_RLOCK(object); 1975 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1976 VM_OBJECT_RUNLOCK(object); 1977 VM_OBJECT_WLOCK(object); 1978 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1979 pmap_object_init_pt(map->pmap, addr, object, pindex, 1980 size); 1981 VM_OBJECT_WUNLOCK(object); 1982 return; 1983 } 1984 VM_OBJECT_LOCK_DOWNGRADE(object); 1985 } 1986 1987 psize = atop(size); 1988 if (psize + pindex > object->size) { 1989 if (object->size < pindex) { 1990 VM_OBJECT_RUNLOCK(object); 1991 return; 1992 } 1993 psize = object->size - pindex; 1994 } 1995 1996 start = 0; 1997 p_start = NULL; 1998 threshold = MAX_INIT_PT; 1999 2000 p = vm_page_find_least(object, pindex); 2001 /* 2002 * Assert: the variable p is either (1) the page with the 2003 * least pindex greater than or equal to the parameter pindex 2004 * or (2) NULL. 2005 */ 2006 for (; 2007 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2008 p = TAILQ_NEXT(p, listq)) { 2009 /* 2010 * don't allow an madvise to blow away our really 2011 * free pages allocating pv entries. 2012 */ 2013 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2014 vm_page_count_severe()) || 2015 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2016 tmpidx >= threshold)) { 2017 psize = tmpidx; 2018 break; 2019 } 2020 if (p->valid == VM_PAGE_BITS_ALL) { 2021 if (p_start == NULL) { 2022 start = addr + ptoa(tmpidx); 2023 p_start = p; 2024 } 2025 /* Jump ahead if a superpage mapping is possible. */ 2026 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2027 (pagesizes[p->psind] - 1)) == 0) { 2028 mask = atop(pagesizes[p->psind]) - 1; 2029 if (tmpidx + mask < psize && 2030 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2031 p += mask; 2032 threshold += mask; 2033 } 2034 } 2035 } else if (p_start != NULL) { 2036 pmap_enter_object(map->pmap, start, addr + 2037 ptoa(tmpidx), p_start, prot); 2038 p_start = NULL; 2039 } 2040 } 2041 if (p_start != NULL) 2042 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2043 p_start, prot); 2044 VM_OBJECT_RUNLOCK(object); 2045 } 2046 2047 /* 2048 * vm_map_protect: 2049 * 2050 * Sets the protection of the specified address 2051 * region in the target map. If "set_max" is 2052 * specified, the maximum protection is to be set; 2053 * otherwise, only the current protection is affected. 2054 */ 2055 int 2056 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2057 vm_prot_t new_prot, boolean_t set_max) 2058 { 2059 vm_map_entry_t current, entry; 2060 vm_object_t obj; 2061 struct ucred *cred; 2062 vm_prot_t old_prot; 2063 2064 if (start == end) 2065 return (KERN_SUCCESS); 2066 2067 vm_map_lock(map); 2068 2069 /* 2070 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2071 * need to fault pages into the map and will drop the map lock while 2072 * doing so, and the VM object may end up in an inconsistent state if we 2073 * update the protection on the map entry in between faults. 2074 */ 2075 vm_map_wait_busy(map); 2076 2077 VM_MAP_RANGE_CHECK(map, start, end); 2078 2079 if (vm_map_lookup_entry(map, start, &entry)) { 2080 vm_map_clip_start(map, entry, start); 2081 } else { 2082 entry = entry->next; 2083 } 2084 2085 /* 2086 * Make a first pass to check for protection violations. 2087 */ 2088 for (current = entry; current->start < end; current = current->next) { 2089 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2090 continue; 2091 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2092 vm_map_unlock(map); 2093 return (KERN_INVALID_ARGUMENT); 2094 } 2095 if ((new_prot & current->max_protection) != new_prot) { 2096 vm_map_unlock(map); 2097 return (KERN_PROTECTION_FAILURE); 2098 } 2099 } 2100 2101 /* 2102 * Do an accounting pass for private read-only mappings that 2103 * now will do cow due to allowed write (e.g. debugger sets 2104 * breakpoint on text segment) 2105 */ 2106 for (current = entry; current->start < end; current = current->next) { 2107 2108 vm_map_clip_end(map, current, end); 2109 2110 if (set_max || 2111 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2112 ENTRY_CHARGED(current) || 2113 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2114 continue; 2115 } 2116 2117 cred = curthread->td_ucred; 2118 obj = current->object.vm_object; 2119 2120 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2121 if (!swap_reserve(current->end - current->start)) { 2122 vm_map_unlock(map); 2123 return (KERN_RESOURCE_SHORTAGE); 2124 } 2125 crhold(cred); 2126 current->cred = cred; 2127 continue; 2128 } 2129 2130 VM_OBJECT_WLOCK(obj); 2131 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2132 VM_OBJECT_WUNLOCK(obj); 2133 continue; 2134 } 2135 2136 /* 2137 * Charge for the whole object allocation now, since 2138 * we cannot distinguish between non-charged and 2139 * charged clipped mapping of the same object later. 2140 */ 2141 KASSERT(obj->charge == 0, 2142 ("vm_map_protect: object %p overcharged (entry %p)", 2143 obj, current)); 2144 if (!swap_reserve(ptoa(obj->size))) { 2145 VM_OBJECT_WUNLOCK(obj); 2146 vm_map_unlock(map); 2147 return (KERN_RESOURCE_SHORTAGE); 2148 } 2149 2150 crhold(cred); 2151 obj->cred = cred; 2152 obj->charge = ptoa(obj->size); 2153 VM_OBJECT_WUNLOCK(obj); 2154 } 2155 2156 /* 2157 * Go back and fix up protections. [Note that clipping is not 2158 * necessary the second time.] 2159 */ 2160 for (current = entry; current->start < end; current = current->next) { 2161 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2162 continue; 2163 2164 old_prot = current->protection; 2165 2166 if (set_max) 2167 current->protection = 2168 (current->max_protection = new_prot) & 2169 old_prot; 2170 else 2171 current->protection = new_prot; 2172 2173 /* 2174 * For user wired map entries, the normal lazy evaluation of 2175 * write access upgrades through soft page faults is 2176 * undesirable. Instead, immediately copy any pages that are 2177 * copy-on-write and enable write access in the physical map. 2178 */ 2179 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2180 (current->protection & VM_PROT_WRITE) != 0 && 2181 (old_prot & VM_PROT_WRITE) == 0) 2182 vm_fault_copy_entry(map, map, current, current, NULL); 2183 2184 /* 2185 * When restricting access, update the physical map. Worry 2186 * about copy-on-write here. 2187 */ 2188 if ((old_prot & ~current->protection) != 0) { 2189 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2190 VM_PROT_ALL) 2191 pmap_protect(map->pmap, current->start, 2192 current->end, 2193 current->protection & MASK(current)); 2194 #undef MASK 2195 } 2196 vm_map_simplify_entry(map, current); 2197 } 2198 vm_map_unlock(map); 2199 return (KERN_SUCCESS); 2200 } 2201 2202 /* 2203 * vm_map_madvise: 2204 * 2205 * This routine traverses a processes map handling the madvise 2206 * system call. Advisories are classified as either those effecting 2207 * the vm_map_entry structure, or those effecting the underlying 2208 * objects. 2209 */ 2210 int 2211 vm_map_madvise( 2212 vm_map_t map, 2213 vm_offset_t start, 2214 vm_offset_t end, 2215 int behav) 2216 { 2217 vm_map_entry_t current, entry; 2218 int modify_map = 0; 2219 2220 /* 2221 * Some madvise calls directly modify the vm_map_entry, in which case 2222 * we need to use an exclusive lock on the map and we need to perform 2223 * various clipping operations. Otherwise we only need a read-lock 2224 * on the map. 2225 */ 2226 switch(behav) { 2227 case MADV_NORMAL: 2228 case MADV_SEQUENTIAL: 2229 case MADV_RANDOM: 2230 case MADV_NOSYNC: 2231 case MADV_AUTOSYNC: 2232 case MADV_NOCORE: 2233 case MADV_CORE: 2234 if (start == end) 2235 return (KERN_SUCCESS); 2236 modify_map = 1; 2237 vm_map_lock(map); 2238 break; 2239 case MADV_WILLNEED: 2240 case MADV_DONTNEED: 2241 case MADV_FREE: 2242 if (start == end) 2243 return (KERN_SUCCESS); 2244 vm_map_lock_read(map); 2245 break; 2246 default: 2247 return (KERN_INVALID_ARGUMENT); 2248 } 2249 2250 /* 2251 * Locate starting entry and clip if necessary. 2252 */ 2253 VM_MAP_RANGE_CHECK(map, start, end); 2254 2255 if (vm_map_lookup_entry(map, start, &entry)) { 2256 if (modify_map) 2257 vm_map_clip_start(map, entry, start); 2258 } else { 2259 entry = entry->next; 2260 } 2261 2262 if (modify_map) { 2263 /* 2264 * madvise behaviors that are implemented in the vm_map_entry. 2265 * 2266 * We clip the vm_map_entry so that behavioral changes are 2267 * limited to the specified address range. 2268 */ 2269 for (current = entry; current->start < end; 2270 current = current->next) { 2271 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2272 continue; 2273 2274 vm_map_clip_end(map, current, end); 2275 2276 switch (behav) { 2277 case MADV_NORMAL: 2278 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2279 break; 2280 case MADV_SEQUENTIAL: 2281 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2282 break; 2283 case MADV_RANDOM: 2284 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2285 break; 2286 case MADV_NOSYNC: 2287 current->eflags |= MAP_ENTRY_NOSYNC; 2288 break; 2289 case MADV_AUTOSYNC: 2290 current->eflags &= ~MAP_ENTRY_NOSYNC; 2291 break; 2292 case MADV_NOCORE: 2293 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2294 break; 2295 case MADV_CORE: 2296 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2297 break; 2298 default: 2299 break; 2300 } 2301 vm_map_simplify_entry(map, current); 2302 } 2303 vm_map_unlock(map); 2304 } else { 2305 vm_pindex_t pstart, pend; 2306 2307 /* 2308 * madvise behaviors that are implemented in the underlying 2309 * vm_object. 2310 * 2311 * Since we don't clip the vm_map_entry, we have to clip 2312 * the vm_object pindex and count. 2313 */ 2314 for (current = entry; current->start < end; 2315 current = current->next) { 2316 vm_offset_t useEnd, useStart; 2317 2318 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2319 continue; 2320 2321 pstart = OFF_TO_IDX(current->offset); 2322 pend = pstart + atop(current->end - current->start); 2323 useStart = current->start; 2324 useEnd = current->end; 2325 2326 if (current->start < start) { 2327 pstart += atop(start - current->start); 2328 useStart = start; 2329 } 2330 if (current->end > end) { 2331 pend -= atop(current->end - end); 2332 useEnd = end; 2333 } 2334 2335 if (pstart >= pend) 2336 continue; 2337 2338 /* 2339 * Perform the pmap_advise() before clearing 2340 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2341 * concurrent pmap operation, such as pmap_remove(), 2342 * could clear a reference in the pmap and set 2343 * PGA_REFERENCED on the page before the pmap_advise() 2344 * had completed. Consequently, the page would appear 2345 * referenced based upon an old reference that 2346 * occurred before this pmap_advise() ran. 2347 */ 2348 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2349 pmap_advise(map->pmap, useStart, useEnd, 2350 behav); 2351 2352 vm_object_madvise(current->object.vm_object, pstart, 2353 pend, behav); 2354 2355 /* 2356 * Pre-populate paging structures in the 2357 * WILLNEED case. For wired entries, the 2358 * paging structures are already populated. 2359 */ 2360 if (behav == MADV_WILLNEED && 2361 current->wired_count == 0) { 2362 vm_map_pmap_enter(map, 2363 useStart, 2364 current->protection, 2365 current->object.vm_object, 2366 pstart, 2367 ptoa(pend - pstart), 2368 MAP_PREFAULT_MADVISE 2369 ); 2370 } 2371 } 2372 vm_map_unlock_read(map); 2373 } 2374 return (0); 2375 } 2376 2377 2378 /* 2379 * vm_map_inherit: 2380 * 2381 * Sets the inheritance of the specified address 2382 * range in the target map. Inheritance 2383 * affects how the map will be shared with 2384 * child maps at the time of vmspace_fork. 2385 */ 2386 int 2387 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2388 vm_inherit_t new_inheritance) 2389 { 2390 vm_map_entry_t entry; 2391 vm_map_entry_t temp_entry; 2392 2393 switch (new_inheritance) { 2394 case VM_INHERIT_NONE: 2395 case VM_INHERIT_COPY: 2396 case VM_INHERIT_SHARE: 2397 case VM_INHERIT_ZERO: 2398 break; 2399 default: 2400 return (KERN_INVALID_ARGUMENT); 2401 } 2402 if (start == end) 2403 return (KERN_SUCCESS); 2404 vm_map_lock(map); 2405 VM_MAP_RANGE_CHECK(map, start, end); 2406 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2407 entry = temp_entry; 2408 vm_map_clip_start(map, entry, start); 2409 } else 2410 entry = temp_entry->next; 2411 while (entry->start < end) { 2412 vm_map_clip_end(map, entry, end); 2413 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2414 new_inheritance != VM_INHERIT_ZERO) 2415 entry->inheritance = new_inheritance; 2416 vm_map_simplify_entry(map, entry); 2417 entry = entry->next; 2418 } 2419 vm_map_unlock(map); 2420 return (KERN_SUCCESS); 2421 } 2422 2423 /* 2424 * vm_map_unwire: 2425 * 2426 * Implements both kernel and user unwiring. 2427 */ 2428 int 2429 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2430 int flags) 2431 { 2432 vm_map_entry_t entry, first_entry, tmp_entry; 2433 vm_offset_t saved_start; 2434 unsigned int last_timestamp; 2435 int rv; 2436 boolean_t need_wakeup, result, user_unwire; 2437 2438 if (start == end) 2439 return (KERN_SUCCESS); 2440 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2441 vm_map_lock(map); 2442 VM_MAP_RANGE_CHECK(map, start, end); 2443 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2444 if (flags & VM_MAP_WIRE_HOLESOK) 2445 first_entry = first_entry->next; 2446 else { 2447 vm_map_unlock(map); 2448 return (KERN_INVALID_ADDRESS); 2449 } 2450 } 2451 last_timestamp = map->timestamp; 2452 entry = first_entry; 2453 while (entry->start < end) { 2454 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2455 /* 2456 * We have not yet clipped the entry. 2457 */ 2458 saved_start = (start >= entry->start) ? start : 2459 entry->start; 2460 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2461 if (vm_map_unlock_and_wait(map, 0)) { 2462 /* 2463 * Allow interruption of user unwiring? 2464 */ 2465 } 2466 vm_map_lock(map); 2467 if (last_timestamp+1 != map->timestamp) { 2468 /* 2469 * Look again for the entry because the map was 2470 * modified while it was unlocked. 2471 * Specifically, the entry may have been 2472 * clipped, merged, or deleted. 2473 */ 2474 if (!vm_map_lookup_entry(map, saved_start, 2475 &tmp_entry)) { 2476 if (flags & VM_MAP_WIRE_HOLESOK) 2477 tmp_entry = tmp_entry->next; 2478 else { 2479 if (saved_start == start) { 2480 /* 2481 * First_entry has been deleted. 2482 */ 2483 vm_map_unlock(map); 2484 return (KERN_INVALID_ADDRESS); 2485 } 2486 end = saved_start; 2487 rv = KERN_INVALID_ADDRESS; 2488 goto done; 2489 } 2490 } 2491 if (entry == first_entry) 2492 first_entry = tmp_entry; 2493 else 2494 first_entry = NULL; 2495 entry = tmp_entry; 2496 } 2497 last_timestamp = map->timestamp; 2498 continue; 2499 } 2500 vm_map_clip_start(map, entry, start); 2501 vm_map_clip_end(map, entry, end); 2502 /* 2503 * Mark the entry in case the map lock is released. (See 2504 * above.) 2505 */ 2506 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2507 entry->wiring_thread == NULL, 2508 ("owned map entry %p", entry)); 2509 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2510 entry->wiring_thread = curthread; 2511 /* 2512 * Check the map for holes in the specified region. 2513 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2514 */ 2515 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2516 (entry->end < end && entry->next->start > entry->end)) { 2517 end = entry->end; 2518 rv = KERN_INVALID_ADDRESS; 2519 goto done; 2520 } 2521 /* 2522 * If system unwiring, require that the entry is system wired. 2523 */ 2524 if (!user_unwire && 2525 vm_map_entry_system_wired_count(entry) == 0) { 2526 end = entry->end; 2527 rv = KERN_INVALID_ARGUMENT; 2528 goto done; 2529 } 2530 entry = entry->next; 2531 } 2532 rv = KERN_SUCCESS; 2533 done: 2534 need_wakeup = FALSE; 2535 if (first_entry == NULL) { 2536 result = vm_map_lookup_entry(map, start, &first_entry); 2537 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2538 first_entry = first_entry->next; 2539 else 2540 KASSERT(result, ("vm_map_unwire: lookup failed")); 2541 } 2542 for (entry = first_entry; entry->start < end; entry = entry->next) { 2543 /* 2544 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2545 * space in the unwired region could have been mapped 2546 * while the map lock was dropped for draining 2547 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2548 * could be simultaneously wiring this new mapping 2549 * entry. Detect these cases and skip any entries 2550 * marked as in transition by us. 2551 */ 2552 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2553 entry->wiring_thread != curthread) { 2554 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2555 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2556 continue; 2557 } 2558 2559 if (rv == KERN_SUCCESS && (!user_unwire || 2560 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2561 if (user_unwire) 2562 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2563 if (entry->wired_count == 1) 2564 vm_map_entry_unwire(map, entry); 2565 else 2566 entry->wired_count--; 2567 } 2568 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2569 ("vm_map_unwire: in-transition flag missing %p", entry)); 2570 KASSERT(entry->wiring_thread == curthread, 2571 ("vm_map_unwire: alien wire %p", entry)); 2572 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2573 entry->wiring_thread = NULL; 2574 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2575 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2576 need_wakeup = TRUE; 2577 } 2578 vm_map_simplify_entry(map, entry); 2579 } 2580 vm_map_unlock(map); 2581 if (need_wakeup) 2582 vm_map_wakeup(map); 2583 return (rv); 2584 } 2585 2586 /* 2587 * vm_map_wire_entry_failure: 2588 * 2589 * Handle a wiring failure on the given entry. 2590 * 2591 * The map should be locked. 2592 */ 2593 static void 2594 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2595 vm_offset_t failed_addr) 2596 { 2597 2598 VM_MAP_ASSERT_LOCKED(map); 2599 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2600 entry->wired_count == 1, 2601 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2602 KASSERT(failed_addr < entry->end, 2603 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2604 2605 /* 2606 * If any pages at the start of this entry were successfully wired, 2607 * then unwire them. 2608 */ 2609 if (failed_addr > entry->start) { 2610 pmap_unwire(map->pmap, entry->start, failed_addr); 2611 vm_object_unwire(entry->object.vm_object, entry->offset, 2612 failed_addr - entry->start, PQ_ACTIVE); 2613 } 2614 2615 /* 2616 * Assign an out-of-range value to represent the failure to wire this 2617 * entry. 2618 */ 2619 entry->wired_count = -1; 2620 } 2621 2622 /* 2623 * vm_map_wire: 2624 * 2625 * Implements both kernel and user wiring. 2626 */ 2627 int 2628 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2629 int flags) 2630 { 2631 vm_map_entry_t entry, first_entry, tmp_entry; 2632 vm_offset_t faddr, saved_end, saved_start; 2633 unsigned int last_timestamp; 2634 int rv; 2635 boolean_t need_wakeup, result, user_wire; 2636 vm_prot_t prot; 2637 2638 if (start == end) 2639 return (KERN_SUCCESS); 2640 prot = 0; 2641 if (flags & VM_MAP_WIRE_WRITE) 2642 prot |= VM_PROT_WRITE; 2643 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2644 vm_map_lock(map); 2645 VM_MAP_RANGE_CHECK(map, start, end); 2646 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2647 if (flags & VM_MAP_WIRE_HOLESOK) 2648 first_entry = first_entry->next; 2649 else { 2650 vm_map_unlock(map); 2651 return (KERN_INVALID_ADDRESS); 2652 } 2653 } 2654 last_timestamp = map->timestamp; 2655 entry = first_entry; 2656 while (entry->start < end) { 2657 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2658 /* 2659 * We have not yet clipped the entry. 2660 */ 2661 saved_start = (start >= entry->start) ? start : 2662 entry->start; 2663 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2664 if (vm_map_unlock_and_wait(map, 0)) { 2665 /* 2666 * Allow interruption of user wiring? 2667 */ 2668 } 2669 vm_map_lock(map); 2670 if (last_timestamp + 1 != map->timestamp) { 2671 /* 2672 * Look again for the entry because the map was 2673 * modified while it was unlocked. 2674 * Specifically, the entry may have been 2675 * clipped, merged, or deleted. 2676 */ 2677 if (!vm_map_lookup_entry(map, saved_start, 2678 &tmp_entry)) { 2679 if (flags & VM_MAP_WIRE_HOLESOK) 2680 tmp_entry = tmp_entry->next; 2681 else { 2682 if (saved_start == start) { 2683 /* 2684 * first_entry has been deleted. 2685 */ 2686 vm_map_unlock(map); 2687 return (KERN_INVALID_ADDRESS); 2688 } 2689 end = saved_start; 2690 rv = KERN_INVALID_ADDRESS; 2691 goto done; 2692 } 2693 } 2694 if (entry == first_entry) 2695 first_entry = tmp_entry; 2696 else 2697 first_entry = NULL; 2698 entry = tmp_entry; 2699 } 2700 last_timestamp = map->timestamp; 2701 continue; 2702 } 2703 vm_map_clip_start(map, entry, start); 2704 vm_map_clip_end(map, entry, end); 2705 /* 2706 * Mark the entry in case the map lock is released. (See 2707 * above.) 2708 */ 2709 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2710 entry->wiring_thread == NULL, 2711 ("owned map entry %p", entry)); 2712 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2713 entry->wiring_thread = curthread; 2714 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2715 || (entry->protection & prot) != prot) { 2716 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2717 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2718 end = entry->end; 2719 rv = KERN_INVALID_ADDRESS; 2720 goto done; 2721 } 2722 goto next_entry; 2723 } 2724 if (entry->wired_count == 0) { 2725 entry->wired_count++; 2726 saved_start = entry->start; 2727 saved_end = entry->end; 2728 2729 /* 2730 * Release the map lock, relying on the in-transition 2731 * mark. Mark the map busy for fork. 2732 */ 2733 vm_map_busy(map); 2734 vm_map_unlock(map); 2735 2736 faddr = saved_start; 2737 do { 2738 /* 2739 * Simulate a fault to get the page and enter 2740 * it into the physical map. 2741 */ 2742 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 2743 VM_FAULT_WIRE)) != KERN_SUCCESS) 2744 break; 2745 } while ((faddr += PAGE_SIZE) < saved_end); 2746 vm_map_lock(map); 2747 vm_map_unbusy(map); 2748 if (last_timestamp + 1 != map->timestamp) { 2749 /* 2750 * Look again for the entry because the map was 2751 * modified while it was unlocked. The entry 2752 * may have been clipped, but NOT merged or 2753 * deleted. 2754 */ 2755 result = vm_map_lookup_entry(map, saved_start, 2756 &tmp_entry); 2757 KASSERT(result, ("vm_map_wire: lookup failed")); 2758 if (entry == first_entry) 2759 first_entry = tmp_entry; 2760 else 2761 first_entry = NULL; 2762 entry = tmp_entry; 2763 while (entry->end < saved_end) { 2764 /* 2765 * In case of failure, handle entries 2766 * that were not fully wired here; 2767 * fully wired entries are handled 2768 * later. 2769 */ 2770 if (rv != KERN_SUCCESS && 2771 faddr < entry->end) 2772 vm_map_wire_entry_failure(map, 2773 entry, faddr); 2774 entry = entry->next; 2775 } 2776 } 2777 last_timestamp = map->timestamp; 2778 if (rv != KERN_SUCCESS) { 2779 vm_map_wire_entry_failure(map, entry, faddr); 2780 end = entry->end; 2781 goto done; 2782 } 2783 } else if (!user_wire || 2784 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2785 entry->wired_count++; 2786 } 2787 /* 2788 * Check the map for holes in the specified region. 2789 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2790 */ 2791 next_entry: 2792 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 2793 entry->end < end && entry->next->start > entry->end) { 2794 end = entry->end; 2795 rv = KERN_INVALID_ADDRESS; 2796 goto done; 2797 } 2798 entry = entry->next; 2799 } 2800 rv = KERN_SUCCESS; 2801 done: 2802 need_wakeup = FALSE; 2803 if (first_entry == NULL) { 2804 result = vm_map_lookup_entry(map, start, &first_entry); 2805 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2806 first_entry = first_entry->next; 2807 else 2808 KASSERT(result, ("vm_map_wire: lookup failed")); 2809 } 2810 for (entry = first_entry; entry->start < end; entry = entry->next) { 2811 /* 2812 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2813 * space in the unwired region could have been mapped 2814 * while the map lock was dropped for faulting in the 2815 * pages or draining MAP_ENTRY_IN_TRANSITION. 2816 * Moreover, another thread could be simultaneously 2817 * wiring this new mapping entry. Detect these cases 2818 * and skip any entries marked as in transition not by us. 2819 */ 2820 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2821 entry->wiring_thread != curthread) { 2822 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2823 ("vm_map_wire: !HOLESOK and new/changed entry")); 2824 continue; 2825 } 2826 2827 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2828 goto next_entry_done; 2829 2830 if (rv == KERN_SUCCESS) { 2831 if (user_wire) 2832 entry->eflags |= MAP_ENTRY_USER_WIRED; 2833 } else if (entry->wired_count == -1) { 2834 /* 2835 * Wiring failed on this entry. Thus, unwiring is 2836 * unnecessary. 2837 */ 2838 entry->wired_count = 0; 2839 } else if (!user_wire || 2840 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2841 /* 2842 * Undo the wiring. Wiring succeeded on this entry 2843 * but failed on a later entry. 2844 */ 2845 if (entry->wired_count == 1) 2846 vm_map_entry_unwire(map, entry); 2847 else 2848 entry->wired_count--; 2849 } 2850 next_entry_done: 2851 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2852 ("vm_map_wire: in-transition flag missing %p", entry)); 2853 KASSERT(entry->wiring_thread == curthread, 2854 ("vm_map_wire: alien wire %p", entry)); 2855 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2856 MAP_ENTRY_WIRE_SKIPPED); 2857 entry->wiring_thread = NULL; 2858 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2859 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2860 need_wakeup = TRUE; 2861 } 2862 vm_map_simplify_entry(map, entry); 2863 } 2864 vm_map_unlock(map); 2865 if (need_wakeup) 2866 vm_map_wakeup(map); 2867 return (rv); 2868 } 2869 2870 /* 2871 * vm_map_sync 2872 * 2873 * Push any dirty cached pages in the address range to their pager. 2874 * If syncio is TRUE, dirty pages are written synchronously. 2875 * If invalidate is TRUE, any cached pages are freed as well. 2876 * 2877 * If the size of the region from start to end is zero, we are 2878 * supposed to flush all modified pages within the region containing 2879 * start. Unfortunately, a region can be split or coalesced with 2880 * neighboring regions, making it difficult to determine what the 2881 * original region was. Therefore, we approximate this requirement by 2882 * flushing the current region containing start. 2883 * 2884 * Returns an error if any part of the specified range is not mapped. 2885 */ 2886 int 2887 vm_map_sync( 2888 vm_map_t map, 2889 vm_offset_t start, 2890 vm_offset_t end, 2891 boolean_t syncio, 2892 boolean_t invalidate) 2893 { 2894 vm_map_entry_t current; 2895 vm_map_entry_t entry; 2896 vm_size_t size; 2897 vm_object_t object; 2898 vm_ooffset_t offset; 2899 unsigned int last_timestamp; 2900 boolean_t failed; 2901 2902 vm_map_lock_read(map); 2903 VM_MAP_RANGE_CHECK(map, start, end); 2904 if (!vm_map_lookup_entry(map, start, &entry)) { 2905 vm_map_unlock_read(map); 2906 return (KERN_INVALID_ADDRESS); 2907 } else if (start == end) { 2908 start = entry->start; 2909 end = entry->end; 2910 } 2911 /* 2912 * Make a first pass to check for user-wired memory and holes. 2913 */ 2914 for (current = entry; current->start < end; current = current->next) { 2915 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2916 vm_map_unlock_read(map); 2917 return (KERN_INVALID_ARGUMENT); 2918 } 2919 if (end > current->end && 2920 current->end != current->next->start) { 2921 vm_map_unlock_read(map); 2922 return (KERN_INVALID_ADDRESS); 2923 } 2924 } 2925 2926 if (invalidate) 2927 pmap_remove(map->pmap, start, end); 2928 failed = FALSE; 2929 2930 /* 2931 * Make a second pass, cleaning/uncaching pages from the indicated 2932 * objects as we go. 2933 */ 2934 for (current = entry; current->start < end;) { 2935 offset = current->offset + (start - current->start); 2936 size = (end <= current->end ? end : current->end) - start; 2937 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2938 vm_map_t smap; 2939 vm_map_entry_t tentry; 2940 vm_size_t tsize; 2941 2942 smap = current->object.sub_map; 2943 vm_map_lock_read(smap); 2944 (void) vm_map_lookup_entry(smap, offset, &tentry); 2945 tsize = tentry->end - offset; 2946 if (tsize < size) 2947 size = tsize; 2948 object = tentry->object.vm_object; 2949 offset = tentry->offset + (offset - tentry->start); 2950 vm_map_unlock_read(smap); 2951 } else { 2952 object = current->object.vm_object; 2953 } 2954 vm_object_reference(object); 2955 last_timestamp = map->timestamp; 2956 vm_map_unlock_read(map); 2957 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2958 failed = TRUE; 2959 start += size; 2960 vm_object_deallocate(object); 2961 vm_map_lock_read(map); 2962 if (last_timestamp == map->timestamp || 2963 !vm_map_lookup_entry(map, start, ¤t)) 2964 current = current->next; 2965 } 2966 2967 vm_map_unlock_read(map); 2968 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2969 } 2970 2971 /* 2972 * vm_map_entry_unwire: [ internal use only ] 2973 * 2974 * Make the region specified by this entry pageable. 2975 * 2976 * The map in question should be locked. 2977 * [This is the reason for this routine's existence.] 2978 */ 2979 static void 2980 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2981 { 2982 2983 VM_MAP_ASSERT_LOCKED(map); 2984 KASSERT(entry->wired_count > 0, 2985 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 2986 pmap_unwire(map->pmap, entry->start, entry->end); 2987 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 2988 entry->start, PQ_ACTIVE); 2989 entry->wired_count = 0; 2990 } 2991 2992 static void 2993 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2994 { 2995 2996 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2997 vm_object_deallocate(entry->object.vm_object); 2998 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2999 } 3000 3001 /* 3002 * vm_map_entry_delete: [ internal use only ] 3003 * 3004 * Deallocate the given entry from the target map. 3005 */ 3006 static void 3007 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3008 { 3009 vm_object_t object; 3010 vm_pindex_t offidxstart, offidxend, count, size1; 3011 vm_size_t size; 3012 3013 vm_map_entry_unlink(map, entry); 3014 object = entry->object.vm_object; 3015 3016 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3017 MPASS(entry->cred == NULL); 3018 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3019 MPASS(object == NULL); 3020 vm_map_entry_deallocate(entry, map->system_map); 3021 return; 3022 } 3023 3024 size = entry->end - entry->start; 3025 map->size -= size; 3026 3027 if (entry->cred != NULL) { 3028 swap_release_by_cred(size, entry->cred); 3029 crfree(entry->cred); 3030 } 3031 3032 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 3033 (object != NULL)) { 3034 KASSERT(entry->cred == NULL || object->cred == NULL || 3035 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3036 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3037 count = atop(size); 3038 offidxstart = OFF_TO_IDX(entry->offset); 3039 offidxend = offidxstart + count; 3040 VM_OBJECT_WLOCK(object); 3041 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 3042 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 3043 object == kernel_object)) { 3044 vm_object_collapse(object); 3045 3046 /* 3047 * The option OBJPR_NOTMAPPED can be passed here 3048 * because vm_map_delete() already performed 3049 * pmap_remove() on the only mapping to this range 3050 * of pages. 3051 */ 3052 vm_object_page_remove(object, offidxstart, offidxend, 3053 OBJPR_NOTMAPPED); 3054 if (object->type == OBJT_SWAP) 3055 swap_pager_freespace(object, offidxstart, 3056 count); 3057 if (offidxend >= object->size && 3058 offidxstart < object->size) { 3059 size1 = object->size; 3060 object->size = offidxstart; 3061 if (object->cred != NULL) { 3062 size1 -= object->size; 3063 KASSERT(object->charge >= ptoa(size1), 3064 ("object %p charge < 0", object)); 3065 swap_release_by_cred(ptoa(size1), 3066 object->cred); 3067 object->charge -= ptoa(size1); 3068 } 3069 } 3070 } 3071 VM_OBJECT_WUNLOCK(object); 3072 } else 3073 entry->object.vm_object = NULL; 3074 if (map->system_map) 3075 vm_map_entry_deallocate(entry, TRUE); 3076 else { 3077 entry->next = curthread->td_map_def_user; 3078 curthread->td_map_def_user = entry; 3079 } 3080 } 3081 3082 /* 3083 * vm_map_delete: [ internal use only ] 3084 * 3085 * Deallocates the given address range from the target 3086 * map. 3087 */ 3088 int 3089 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3090 { 3091 vm_map_entry_t entry; 3092 vm_map_entry_t first_entry; 3093 3094 VM_MAP_ASSERT_LOCKED(map); 3095 if (start == end) 3096 return (KERN_SUCCESS); 3097 3098 /* 3099 * Find the start of the region, and clip it 3100 */ 3101 if (!vm_map_lookup_entry(map, start, &first_entry)) 3102 entry = first_entry->next; 3103 else { 3104 entry = first_entry; 3105 vm_map_clip_start(map, entry, start); 3106 } 3107 3108 /* 3109 * Step through all entries in this region 3110 */ 3111 while (entry->start < end) { 3112 vm_map_entry_t next; 3113 3114 /* 3115 * Wait for wiring or unwiring of an entry to complete. 3116 * Also wait for any system wirings to disappear on 3117 * user maps. 3118 */ 3119 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3120 (vm_map_pmap(map) != kernel_pmap && 3121 vm_map_entry_system_wired_count(entry) != 0)) { 3122 unsigned int last_timestamp; 3123 vm_offset_t saved_start; 3124 vm_map_entry_t tmp_entry; 3125 3126 saved_start = entry->start; 3127 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3128 last_timestamp = map->timestamp; 3129 (void) vm_map_unlock_and_wait(map, 0); 3130 vm_map_lock(map); 3131 if (last_timestamp + 1 != map->timestamp) { 3132 /* 3133 * Look again for the entry because the map was 3134 * modified while it was unlocked. 3135 * Specifically, the entry may have been 3136 * clipped, merged, or deleted. 3137 */ 3138 if (!vm_map_lookup_entry(map, saved_start, 3139 &tmp_entry)) 3140 entry = tmp_entry->next; 3141 else { 3142 entry = tmp_entry; 3143 vm_map_clip_start(map, entry, 3144 saved_start); 3145 } 3146 } 3147 continue; 3148 } 3149 vm_map_clip_end(map, entry, end); 3150 3151 next = entry->next; 3152 3153 /* 3154 * Unwire before removing addresses from the pmap; otherwise, 3155 * unwiring will put the entries back in the pmap. 3156 */ 3157 if (entry->wired_count != 0) { 3158 vm_map_entry_unwire(map, entry); 3159 } 3160 3161 pmap_remove(map->pmap, entry->start, entry->end); 3162 3163 /* 3164 * Delete the entry only after removing all pmap 3165 * entries pointing to its pages. (Otherwise, its 3166 * page frames may be reallocated, and any modify bits 3167 * will be set in the wrong object!) 3168 */ 3169 vm_map_entry_delete(map, entry); 3170 entry = next; 3171 } 3172 return (KERN_SUCCESS); 3173 } 3174 3175 /* 3176 * vm_map_remove: 3177 * 3178 * Remove the given address range from the target map. 3179 * This is the exported form of vm_map_delete. 3180 */ 3181 int 3182 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3183 { 3184 int result; 3185 3186 vm_map_lock(map); 3187 VM_MAP_RANGE_CHECK(map, start, end); 3188 result = vm_map_delete(map, start, end); 3189 vm_map_unlock(map); 3190 return (result); 3191 } 3192 3193 /* 3194 * vm_map_check_protection: 3195 * 3196 * Assert that the target map allows the specified privilege on the 3197 * entire address region given. The entire region must be allocated. 3198 * 3199 * WARNING! This code does not and should not check whether the 3200 * contents of the region is accessible. For example a smaller file 3201 * might be mapped into a larger address space. 3202 * 3203 * NOTE! This code is also called by munmap(). 3204 * 3205 * The map must be locked. A read lock is sufficient. 3206 */ 3207 boolean_t 3208 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3209 vm_prot_t protection) 3210 { 3211 vm_map_entry_t entry; 3212 vm_map_entry_t tmp_entry; 3213 3214 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3215 return (FALSE); 3216 entry = tmp_entry; 3217 3218 while (start < end) { 3219 /* 3220 * No holes allowed! 3221 */ 3222 if (start < entry->start) 3223 return (FALSE); 3224 /* 3225 * Check protection associated with entry. 3226 */ 3227 if ((entry->protection & protection) != protection) 3228 return (FALSE); 3229 /* go to next entry */ 3230 start = entry->end; 3231 entry = entry->next; 3232 } 3233 return (TRUE); 3234 } 3235 3236 /* 3237 * vm_map_copy_entry: 3238 * 3239 * Copies the contents of the source entry to the destination 3240 * entry. The entries *must* be aligned properly. 3241 */ 3242 static void 3243 vm_map_copy_entry( 3244 vm_map_t src_map, 3245 vm_map_t dst_map, 3246 vm_map_entry_t src_entry, 3247 vm_map_entry_t dst_entry, 3248 vm_ooffset_t *fork_charge) 3249 { 3250 vm_object_t src_object; 3251 vm_map_entry_t fake_entry; 3252 vm_offset_t size; 3253 struct ucred *cred; 3254 int charged; 3255 3256 VM_MAP_ASSERT_LOCKED(dst_map); 3257 3258 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3259 return; 3260 3261 if (src_entry->wired_count == 0 || 3262 (src_entry->protection & VM_PROT_WRITE) == 0) { 3263 /* 3264 * If the source entry is marked needs_copy, it is already 3265 * write-protected. 3266 */ 3267 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3268 (src_entry->protection & VM_PROT_WRITE) != 0) { 3269 pmap_protect(src_map->pmap, 3270 src_entry->start, 3271 src_entry->end, 3272 src_entry->protection & ~VM_PROT_WRITE); 3273 } 3274 3275 /* 3276 * Make a copy of the object. 3277 */ 3278 size = src_entry->end - src_entry->start; 3279 if ((src_object = src_entry->object.vm_object) != NULL) { 3280 VM_OBJECT_WLOCK(src_object); 3281 charged = ENTRY_CHARGED(src_entry); 3282 if (src_object->handle == NULL && 3283 (src_object->type == OBJT_DEFAULT || 3284 src_object->type == OBJT_SWAP)) { 3285 vm_object_collapse(src_object); 3286 if ((src_object->flags & (OBJ_NOSPLIT | 3287 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3288 vm_object_split(src_entry); 3289 src_object = 3290 src_entry->object.vm_object; 3291 } 3292 } 3293 vm_object_reference_locked(src_object); 3294 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3295 if (src_entry->cred != NULL && 3296 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3297 KASSERT(src_object->cred == NULL, 3298 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3299 src_object)); 3300 src_object->cred = src_entry->cred; 3301 src_object->charge = size; 3302 } 3303 VM_OBJECT_WUNLOCK(src_object); 3304 dst_entry->object.vm_object = src_object; 3305 if (charged) { 3306 cred = curthread->td_ucred; 3307 crhold(cred); 3308 dst_entry->cred = cred; 3309 *fork_charge += size; 3310 if (!(src_entry->eflags & 3311 MAP_ENTRY_NEEDS_COPY)) { 3312 crhold(cred); 3313 src_entry->cred = cred; 3314 *fork_charge += size; 3315 } 3316 } 3317 src_entry->eflags |= MAP_ENTRY_COW | 3318 MAP_ENTRY_NEEDS_COPY; 3319 dst_entry->eflags |= MAP_ENTRY_COW | 3320 MAP_ENTRY_NEEDS_COPY; 3321 dst_entry->offset = src_entry->offset; 3322 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3323 /* 3324 * MAP_ENTRY_VN_WRITECNT cannot 3325 * indicate write reference from 3326 * src_entry, since the entry is 3327 * marked as needs copy. Allocate a 3328 * fake entry that is used to 3329 * decrement object->un_pager.vnp.writecount 3330 * at the appropriate time. Attach 3331 * fake_entry to the deferred list. 3332 */ 3333 fake_entry = vm_map_entry_create(dst_map); 3334 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3335 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3336 vm_object_reference(src_object); 3337 fake_entry->object.vm_object = src_object; 3338 fake_entry->start = src_entry->start; 3339 fake_entry->end = src_entry->end; 3340 fake_entry->next = curthread->td_map_def_user; 3341 curthread->td_map_def_user = fake_entry; 3342 } 3343 3344 pmap_copy(dst_map->pmap, src_map->pmap, 3345 dst_entry->start, dst_entry->end - dst_entry->start, 3346 src_entry->start); 3347 } else { 3348 dst_entry->object.vm_object = NULL; 3349 dst_entry->offset = 0; 3350 if (src_entry->cred != NULL) { 3351 dst_entry->cred = curthread->td_ucred; 3352 crhold(dst_entry->cred); 3353 *fork_charge += size; 3354 } 3355 } 3356 } else { 3357 /* 3358 * We don't want to make writeable wired pages copy-on-write. 3359 * Immediately copy these pages into the new map by simulating 3360 * page faults. The new pages are pageable. 3361 */ 3362 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3363 fork_charge); 3364 } 3365 } 3366 3367 /* 3368 * vmspace_map_entry_forked: 3369 * Update the newly-forked vmspace each time a map entry is inherited 3370 * or copied. The values for vm_dsize and vm_tsize are approximate 3371 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3372 */ 3373 static void 3374 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3375 vm_map_entry_t entry) 3376 { 3377 vm_size_t entrysize; 3378 vm_offset_t newend; 3379 3380 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3381 return; 3382 entrysize = entry->end - entry->start; 3383 vm2->vm_map.size += entrysize; 3384 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3385 vm2->vm_ssize += btoc(entrysize); 3386 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3387 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3388 newend = MIN(entry->end, 3389 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3390 vm2->vm_dsize += btoc(newend - entry->start); 3391 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3392 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3393 newend = MIN(entry->end, 3394 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3395 vm2->vm_tsize += btoc(newend - entry->start); 3396 } 3397 } 3398 3399 /* 3400 * vmspace_fork: 3401 * Create a new process vmspace structure and vm_map 3402 * based on those of an existing process. The new map 3403 * is based on the old map, according to the inheritance 3404 * values on the regions in that map. 3405 * 3406 * XXX It might be worth coalescing the entries added to the new vmspace. 3407 * 3408 * The source map must not be locked. 3409 */ 3410 struct vmspace * 3411 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3412 { 3413 struct vmspace *vm2; 3414 vm_map_t new_map, old_map; 3415 vm_map_entry_t new_entry, old_entry; 3416 vm_object_t object; 3417 int locked; 3418 vm_inherit_t inh; 3419 3420 old_map = &vm1->vm_map; 3421 /* Copy immutable fields of vm1 to vm2. */ 3422 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL); 3423 if (vm2 == NULL) 3424 return (NULL); 3425 vm2->vm_taddr = vm1->vm_taddr; 3426 vm2->vm_daddr = vm1->vm_daddr; 3427 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3428 vm_map_lock(old_map); 3429 if (old_map->busy) 3430 vm_map_wait_busy(old_map); 3431 new_map = &vm2->vm_map; 3432 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3433 KASSERT(locked, ("vmspace_fork: lock failed")); 3434 3435 old_entry = old_map->header.next; 3436 3437 while (old_entry != &old_map->header) { 3438 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3439 panic("vm_map_fork: encountered a submap"); 3440 3441 inh = old_entry->inheritance; 3442 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3443 inh != VM_INHERIT_NONE) 3444 inh = VM_INHERIT_COPY; 3445 3446 switch (inh) { 3447 case VM_INHERIT_NONE: 3448 break; 3449 3450 case VM_INHERIT_SHARE: 3451 /* 3452 * Clone the entry, creating the shared object if necessary. 3453 */ 3454 object = old_entry->object.vm_object; 3455 if (object == NULL) { 3456 object = vm_object_allocate(OBJT_DEFAULT, 3457 atop(old_entry->end - old_entry->start)); 3458 old_entry->object.vm_object = object; 3459 old_entry->offset = 0; 3460 if (old_entry->cred != NULL) { 3461 object->cred = old_entry->cred; 3462 object->charge = old_entry->end - 3463 old_entry->start; 3464 old_entry->cred = NULL; 3465 } 3466 } 3467 3468 /* 3469 * Add the reference before calling vm_object_shadow 3470 * to insure that a shadow object is created. 3471 */ 3472 vm_object_reference(object); 3473 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3474 vm_object_shadow(&old_entry->object.vm_object, 3475 &old_entry->offset, 3476 old_entry->end - old_entry->start); 3477 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3478 /* Transfer the second reference too. */ 3479 vm_object_reference( 3480 old_entry->object.vm_object); 3481 3482 /* 3483 * As in vm_map_simplify_entry(), the 3484 * vnode lock will not be acquired in 3485 * this call to vm_object_deallocate(). 3486 */ 3487 vm_object_deallocate(object); 3488 object = old_entry->object.vm_object; 3489 } 3490 VM_OBJECT_WLOCK(object); 3491 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3492 if (old_entry->cred != NULL) { 3493 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3494 object->cred = old_entry->cred; 3495 object->charge = old_entry->end - old_entry->start; 3496 old_entry->cred = NULL; 3497 } 3498 3499 /* 3500 * Assert the correct state of the vnode 3501 * v_writecount while the object is locked, to 3502 * not relock it later for the assertion 3503 * correctness. 3504 */ 3505 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3506 object->type == OBJT_VNODE) { 3507 KASSERT(((struct vnode *)object->handle)-> 3508 v_writecount > 0, 3509 ("vmspace_fork: v_writecount %p", object)); 3510 KASSERT(object->un_pager.vnp.writemappings > 0, 3511 ("vmspace_fork: vnp.writecount %p", 3512 object)); 3513 } 3514 VM_OBJECT_WUNLOCK(object); 3515 3516 /* 3517 * Clone the entry, referencing the shared object. 3518 */ 3519 new_entry = vm_map_entry_create(new_map); 3520 *new_entry = *old_entry; 3521 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3522 MAP_ENTRY_IN_TRANSITION); 3523 new_entry->wiring_thread = NULL; 3524 new_entry->wired_count = 0; 3525 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3526 vnode_pager_update_writecount(object, 3527 new_entry->start, new_entry->end); 3528 } 3529 3530 /* 3531 * Insert the entry into the new map -- we know we're 3532 * inserting at the end of the new map. 3533 */ 3534 vm_map_entry_link(new_map, new_map->header.prev, 3535 new_entry); 3536 vmspace_map_entry_forked(vm1, vm2, new_entry); 3537 3538 /* 3539 * Update the physical map 3540 */ 3541 pmap_copy(new_map->pmap, old_map->pmap, 3542 new_entry->start, 3543 (old_entry->end - old_entry->start), 3544 old_entry->start); 3545 break; 3546 3547 case VM_INHERIT_COPY: 3548 /* 3549 * Clone the entry and link into the map. 3550 */ 3551 new_entry = vm_map_entry_create(new_map); 3552 *new_entry = *old_entry; 3553 /* 3554 * Copied entry is COW over the old object. 3555 */ 3556 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3557 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3558 new_entry->wiring_thread = NULL; 3559 new_entry->wired_count = 0; 3560 new_entry->object.vm_object = NULL; 3561 new_entry->cred = NULL; 3562 vm_map_entry_link(new_map, new_map->header.prev, 3563 new_entry); 3564 vmspace_map_entry_forked(vm1, vm2, new_entry); 3565 vm_map_copy_entry(old_map, new_map, old_entry, 3566 new_entry, fork_charge); 3567 break; 3568 3569 case VM_INHERIT_ZERO: 3570 /* 3571 * Create a new anonymous mapping entry modelled from 3572 * the old one. 3573 */ 3574 new_entry = vm_map_entry_create(new_map); 3575 memset(new_entry, 0, sizeof(*new_entry)); 3576 3577 new_entry->start = old_entry->start; 3578 new_entry->end = old_entry->end; 3579 new_entry->eflags = old_entry->eflags & 3580 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3581 MAP_ENTRY_VN_WRITECNT); 3582 new_entry->protection = old_entry->protection; 3583 new_entry->max_protection = old_entry->max_protection; 3584 new_entry->inheritance = VM_INHERIT_ZERO; 3585 3586 vm_map_entry_link(new_map, new_map->header.prev, 3587 new_entry); 3588 vmspace_map_entry_forked(vm1, vm2, new_entry); 3589 3590 new_entry->cred = curthread->td_ucred; 3591 crhold(new_entry->cred); 3592 *fork_charge += (new_entry->end - new_entry->start); 3593 3594 break; 3595 } 3596 old_entry = old_entry->next; 3597 } 3598 /* 3599 * Use inlined vm_map_unlock() to postpone handling the deferred 3600 * map entries, which cannot be done until both old_map and 3601 * new_map locks are released. 3602 */ 3603 sx_xunlock(&old_map->lock); 3604 sx_xunlock(&new_map->lock); 3605 vm_map_process_deferred(); 3606 3607 return (vm2); 3608 } 3609 3610 /* 3611 * Create a process's stack for exec_new_vmspace(). This function is never 3612 * asked to wire the newly created stack. 3613 */ 3614 int 3615 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3616 vm_prot_t prot, vm_prot_t max, int cow) 3617 { 3618 vm_size_t growsize, init_ssize; 3619 rlim_t vmemlim; 3620 int rv; 3621 3622 MPASS((map->flags & MAP_WIREFUTURE) == 0); 3623 growsize = sgrowsiz; 3624 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3625 vm_map_lock(map); 3626 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3627 /* If we would blow our VMEM resource limit, no go */ 3628 if (map->size + init_ssize > vmemlim) { 3629 rv = KERN_NO_SPACE; 3630 goto out; 3631 } 3632 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3633 max, cow); 3634 out: 3635 vm_map_unlock(map); 3636 return (rv); 3637 } 3638 3639 static int stack_guard_page = 1; 3640 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 3641 &stack_guard_page, 0, 3642 "Specifies the number of guard pages for a stack that grows"); 3643 3644 static int 3645 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3646 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3647 { 3648 vm_map_entry_t new_entry, prev_entry; 3649 vm_offset_t bot, gap_bot, gap_top, top; 3650 vm_size_t init_ssize, sgp; 3651 int orient, rv; 3652 3653 /* 3654 * The stack orientation is piggybacked with the cow argument. 3655 * Extract it into orient and mask the cow argument so that we 3656 * don't pass it around further. 3657 */ 3658 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 3659 KASSERT(orient != 0, ("No stack grow direction")); 3660 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 3661 ("bi-dir stack")); 3662 3663 if (addrbos < vm_map_min(map) || 3664 addrbos + max_ssize > vm_map_max(map) || 3665 addrbos + max_ssize <= addrbos) 3666 return (KERN_INVALID_ADDRESS); 3667 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 3668 if (sgp >= max_ssize) 3669 return (KERN_INVALID_ARGUMENT); 3670 3671 init_ssize = growsize; 3672 if (max_ssize < init_ssize + sgp) 3673 init_ssize = max_ssize - sgp; 3674 3675 /* If addr is already mapped, no go */ 3676 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3677 return (KERN_NO_SPACE); 3678 3679 /* 3680 * If we can't accommodate max_ssize in the current mapping, no go. 3681 */ 3682 if (prev_entry->next->start < addrbos + max_ssize) 3683 return (KERN_NO_SPACE); 3684 3685 /* 3686 * We initially map a stack of only init_ssize. We will grow as 3687 * needed later. Depending on the orientation of the stack (i.e. 3688 * the grow direction) we either map at the top of the range, the 3689 * bottom of the range or in the middle. 3690 * 3691 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3692 * and cow to be 0. Possibly we should eliminate these as input 3693 * parameters, and just pass these values here in the insert call. 3694 */ 3695 if (orient == MAP_STACK_GROWS_DOWN) { 3696 bot = addrbos + max_ssize - init_ssize; 3697 top = bot + init_ssize; 3698 gap_bot = addrbos; 3699 gap_top = bot; 3700 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 3701 bot = addrbos; 3702 top = bot + init_ssize; 3703 gap_bot = top; 3704 gap_top = addrbos + max_ssize; 3705 } 3706 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3707 if (rv != KERN_SUCCESS) 3708 return (rv); 3709 new_entry = prev_entry->next; 3710 KASSERT(new_entry->end == top || new_entry->start == bot, 3711 ("Bad entry start/end for new stack entry")); 3712 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3713 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3714 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3715 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3716 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3717 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3718 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 3719 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 3720 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 3721 if (rv != KERN_SUCCESS) 3722 (void)vm_map_delete(map, bot, top); 3723 return (rv); 3724 } 3725 3726 /* 3727 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 3728 * successfully grow the stack. 3729 */ 3730 static int 3731 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 3732 { 3733 vm_map_entry_t stack_entry; 3734 struct proc *p; 3735 struct vmspace *vm; 3736 struct ucred *cred; 3737 vm_offset_t gap_end, gap_start, grow_start; 3738 size_t grow_amount, guard, max_grow; 3739 rlim_t lmemlim, stacklim, vmemlim; 3740 int rv, rv1; 3741 bool gap_deleted, grow_down, is_procstack; 3742 #ifdef notyet 3743 uint64_t limit; 3744 #endif 3745 #ifdef RACCT 3746 int error; 3747 #endif 3748 3749 p = curproc; 3750 vm = p->p_vmspace; 3751 3752 /* 3753 * Disallow stack growth when the access is performed by a 3754 * debugger or AIO daemon. The reason is that the wrong 3755 * resource limits are applied. 3756 */ 3757 if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL) 3758 return (KERN_FAILURE); 3759 3760 MPASS(!map->system_map); 3761 3762 guard = stack_guard_page * PAGE_SIZE; 3763 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 3764 stacklim = lim_cur(curthread, RLIMIT_STACK); 3765 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3766 retry: 3767 /* If addr is not in a hole for a stack grow area, no need to grow. */ 3768 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 3769 return (KERN_FAILURE); 3770 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 3771 return (KERN_SUCCESS); 3772 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 3773 stack_entry = gap_entry->next; 3774 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 3775 stack_entry->start != gap_entry->end) 3776 return (KERN_FAILURE); 3777 grow_amount = round_page(stack_entry->start - addr); 3778 grow_down = true; 3779 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 3780 stack_entry = gap_entry->prev; 3781 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 3782 stack_entry->end != gap_entry->start) 3783 return (KERN_FAILURE); 3784 grow_amount = round_page(addr + 1 - stack_entry->end); 3785 grow_down = false; 3786 } else { 3787 return (KERN_FAILURE); 3788 } 3789 max_grow = gap_entry->end - gap_entry->start; 3790 if (guard > max_grow) 3791 return (KERN_NO_SPACE); 3792 max_grow -= guard; 3793 if (grow_amount > max_grow) 3794 return (KERN_NO_SPACE); 3795 3796 /* 3797 * If this is the main process stack, see if we're over the stack 3798 * limit. 3799 */ 3800 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 3801 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 3802 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 3803 return (KERN_NO_SPACE); 3804 3805 #ifdef RACCT 3806 if (racct_enable) { 3807 PROC_LOCK(p); 3808 if (is_procstack && racct_set(p, RACCT_STACK, 3809 ctob(vm->vm_ssize) + grow_amount)) { 3810 PROC_UNLOCK(p); 3811 return (KERN_NO_SPACE); 3812 } 3813 PROC_UNLOCK(p); 3814 } 3815 #endif 3816 3817 grow_amount = roundup(grow_amount, sgrowsiz); 3818 if (grow_amount > max_grow) 3819 grow_amount = max_grow; 3820 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3821 grow_amount = trunc_page((vm_size_t)stacklim) - 3822 ctob(vm->vm_ssize); 3823 } 3824 3825 #ifdef notyet 3826 PROC_LOCK(p); 3827 limit = racct_get_available(p, RACCT_STACK); 3828 PROC_UNLOCK(p); 3829 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3830 grow_amount = limit - ctob(vm->vm_ssize); 3831 #endif 3832 3833 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 3834 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3835 rv = KERN_NO_SPACE; 3836 goto out; 3837 } 3838 #ifdef RACCT 3839 if (racct_enable) { 3840 PROC_LOCK(p); 3841 if (racct_set(p, RACCT_MEMLOCK, 3842 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3843 PROC_UNLOCK(p); 3844 rv = KERN_NO_SPACE; 3845 goto out; 3846 } 3847 PROC_UNLOCK(p); 3848 } 3849 #endif 3850 } 3851 3852 /* If we would blow our VMEM resource limit, no go */ 3853 if (map->size + grow_amount > vmemlim) { 3854 rv = KERN_NO_SPACE; 3855 goto out; 3856 } 3857 #ifdef RACCT 3858 if (racct_enable) { 3859 PROC_LOCK(p); 3860 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3861 PROC_UNLOCK(p); 3862 rv = KERN_NO_SPACE; 3863 goto out; 3864 } 3865 PROC_UNLOCK(p); 3866 } 3867 #endif 3868 3869 if (vm_map_lock_upgrade(map)) { 3870 gap_entry = NULL; 3871 vm_map_lock_read(map); 3872 goto retry; 3873 } 3874 3875 if (grow_down) { 3876 grow_start = gap_entry->end - grow_amount; 3877 if (gap_entry->start + grow_amount == gap_entry->end) { 3878 gap_start = gap_entry->start; 3879 gap_end = gap_entry->end; 3880 vm_map_entry_delete(map, gap_entry); 3881 gap_deleted = true; 3882 } else { 3883 MPASS(gap_entry->start < gap_entry->end - grow_amount); 3884 gap_entry->end -= grow_amount; 3885 vm_map_entry_resize_free(map, gap_entry); 3886 gap_deleted = false; 3887 } 3888 rv = vm_map_insert(map, NULL, 0, grow_start, 3889 grow_start + grow_amount, 3890 stack_entry->protection, stack_entry->max_protection, 3891 MAP_STACK_GROWS_DOWN); 3892 if (rv != KERN_SUCCESS) { 3893 if (gap_deleted) { 3894 rv1 = vm_map_insert(map, NULL, 0, gap_start, 3895 gap_end, VM_PROT_NONE, VM_PROT_NONE, 3896 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 3897 MPASS(rv1 == KERN_SUCCESS); 3898 } else { 3899 gap_entry->end += grow_amount; 3900 vm_map_entry_resize_free(map, gap_entry); 3901 } 3902 } 3903 } else { 3904 grow_start = stack_entry->end; 3905 cred = stack_entry->cred; 3906 if (cred == NULL && stack_entry->object.vm_object != NULL) 3907 cred = stack_entry->object.vm_object->cred; 3908 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3909 rv = KERN_NO_SPACE; 3910 /* Grow the underlying object if applicable. */ 3911 else if (stack_entry->object.vm_object == NULL || 3912 vm_object_coalesce(stack_entry->object.vm_object, 3913 stack_entry->offset, 3914 (vm_size_t)(stack_entry->end - stack_entry->start), 3915 (vm_size_t)grow_amount, cred != NULL)) { 3916 if (gap_entry->start + grow_amount == gap_entry->end) 3917 vm_map_entry_delete(map, gap_entry); 3918 else 3919 gap_entry->start += grow_amount; 3920 stack_entry->end += grow_amount; 3921 map->size += grow_amount; 3922 vm_map_entry_resize_free(map, stack_entry); 3923 rv = KERN_SUCCESS; 3924 } else 3925 rv = KERN_FAILURE; 3926 } 3927 if (rv == KERN_SUCCESS && is_procstack) 3928 vm->vm_ssize += btoc(grow_amount); 3929 3930 /* 3931 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3932 */ 3933 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 3934 vm_map_unlock(map); 3935 vm_map_wire(map, grow_start, grow_start + grow_amount, 3936 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 3937 vm_map_lock_read(map); 3938 } else 3939 vm_map_lock_downgrade(map); 3940 3941 out: 3942 #ifdef RACCT 3943 if (racct_enable && rv != KERN_SUCCESS) { 3944 PROC_LOCK(p); 3945 error = racct_set(p, RACCT_VMEM, map->size); 3946 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3947 if (!old_mlock) { 3948 error = racct_set(p, RACCT_MEMLOCK, 3949 ptoa(pmap_wired_count(map->pmap))); 3950 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3951 } 3952 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3953 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3954 PROC_UNLOCK(p); 3955 } 3956 #endif 3957 3958 return (rv); 3959 } 3960 3961 /* 3962 * Unshare the specified VM space for exec. If other processes are 3963 * mapped to it, then create a new one. The new vmspace is null. 3964 */ 3965 int 3966 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3967 { 3968 struct vmspace *oldvmspace = p->p_vmspace; 3969 struct vmspace *newvmspace; 3970 3971 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3972 ("vmspace_exec recursed")); 3973 newvmspace = vmspace_alloc(minuser, maxuser, NULL); 3974 if (newvmspace == NULL) 3975 return (ENOMEM); 3976 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3977 /* 3978 * This code is written like this for prototype purposes. The 3979 * goal is to avoid running down the vmspace here, but let the 3980 * other process's that are still using the vmspace to finally 3981 * run it down. Even though there is little or no chance of blocking 3982 * here, it is a good idea to keep this form for future mods. 3983 */ 3984 PROC_VMSPACE_LOCK(p); 3985 p->p_vmspace = newvmspace; 3986 PROC_VMSPACE_UNLOCK(p); 3987 if (p == curthread->td_proc) 3988 pmap_activate(curthread); 3989 curthread->td_pflags |= TDP_EXECVMSPC; 3990 return (0); 3991 } 3992 3993 /* 3994 * Unshare the specified VM space for forcing COW. This 3995 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3996 */ 3997 int 3998 vmspace_unshare(struct proc *p) 3999 { 4000 struct vmspace *oldvmspace = p->p_vmspace; 4001 struct vmspace *newvmspace; 4002 vm_ooffset_t fork_charge; 4003 4004 if (oldvmspace->vm_refcnt == 1) 4005 return (0); 4006 fork_charge = 0; 4007 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4008 if (newvmspace == NULL) 4009 return (ENOMEM); 4010 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4011 vmspace_free(newvmspace); 4012 return (ENOMEM); 4013 } 4014 PROC_VMSPACE_LOCK(p); 4015 p->p_vmspace = newvmspace; 4016 PROC_VMSPACE_UNLOCK(p); 4017 if (p == curthread->td_proc) 4018 pmap_activate(curthread); 4019 vmspace_free(oldvmspace); 4020 return (0); 4021 } 4022 4023 /* 4024 * vm_map_lookup: 4025 * 4026 * Finds the VM object, offset, and 4027 * protection for a given virtual address in the 4028 * specified map, assuming a page fault of the 4029 * type specified. 4030 * 4031 * Leaves the map in question locked for read; return 4032 * values are guaranteed until a vm_map_lookup_done 4033 * call is performed. Note that the map argument 4034 * is in/out; the returned map must be used in 4035 * the call to vm_map_lookup_done. 4036 * 4037 * A handle (out_entry) is returned for use in 4038 * vm_map_lookup_done, to make that fast. 4039 * 4040 * If a lookup is requested with "write protection" 4041 * specified, the map may be changed to perform virtual 4042 * copying operations, although the data referenced will 4043 * remain the same. 4044 */ 4045 int 4046 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4047 vm_offset_t vaddr, 4048 vm_prot_t fault_typea, 4049 vm_map_entry_t *out_entry, /* OUT */ 4050 vm_object_t *object, /* OUT */ 4051 vm_pindex_t *pindex, /* OUT */ 4052 vm_prot_t *out_prot, /* OUT */ 4053 boolean_t *wired) /* OUT */ 4054 { 4055 vm_map_entry_t entry; 4056 vm_map_t map = *var_map; 4057 vm_prot_t prot; 4058 vm_prot_t fault_type = fault_typea; 4059 vm_object_t eobject; 4060 vm_size_t size; 4061 struct ucred *cred; 4062 4063 RetryLookup: 4064 4065 vm_map_lock_read(map); 4066 4067 RetryLookupLocked: 4068 /* 4069 * Lookup the faulting address. 4070 */ 4071 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4072 vm_map_unlock_read(map); 4073 return (KERN_INVALID_ADDRESS); 4074 } 4075 4076 entry = *out_entry; 4077 4078 /* 4079 * Handle submaps. 4080 */ 4081 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4082 vm_map_t old_map = map; 4083 4084 *var_map = map = entry->object.sub_map; 4085 vm_map_unlock_read(old_map); 4086 goto RetryLookup; 4087 } 4088 4089 /* 4090 * Check whether this task is allowed to have this page. 4091 */ 4092 prot = entry->protection; 4093 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4094 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4095 if (prot == VM_PROT_NONE && map != kernel_map && 4096 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4097 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4098 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4099 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4100 goto RetryLookupLocked; 4101 } 4102 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4103 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4104 vm_map_unlock_read(map); 4105 return (KERN_PROTECTION_FAILURE); 4106 } 4107 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4108 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4109 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4110 ("entry %p flags %x", entry, entry->eflags)); 4111 if ((fault_typea & VM_PROT_COPY) != 0 && 4112 (entry->max_protection & VM_PROT_WRITE) == 0 && 4113 (entry->eflags & MAP_ENTRY_COW) == 0) { 4114 vm_map_unlock_read(map); 4115 return (KERN_PROTECTION_FAILURE); 4116 } 4117 4118 /* 4119 * If this page is not pageable, we have to get it for all possible 4120 * accesses. 4121 */ 4122 *wired = (entry->wired_count != 0); 4123 if (*wired) 4124 fault_type = entry->protection; 4125 size = entry->end - entry->start; 4126 /* 4127 * If the entry was copy-on-write, we either ... 4128 */ 4129 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4130 /* 4131 * If we want to write the page, we may as well handle that 4132 * now since we've got the map locked. 4133 * 4134 * If we don't need to write the page, we just demote the 4135 * permissions allowed. 4136 */ 4137 if ((fault_type & VM_PROT_WRITE) != 0 || 4138 (fault_typea & VM_PROT_COPY) != 0) { 4139 /* 4140 * Make a new object, and place it in the object 4141 * chain. Note that no new references have appeared 4142 * -- one just moved from the map to the new 4143 * object. 4144 */ 4145 if (vm_map_lock_upgrade(map)) 4146 goto RetryLookup; 4147 4148 if (entry->cred == NULL) { 4149 /* 4150 * The debugger owner is charged for 4151 * the memory. 4152 */ 4153 cred = curthread->td_ucred; 4154 crhold(cred); 4155 if (!swap_reserve_by_cred(size, cred)) { 4156 crfree(cred); 4157 vm_map_unlock(map); 4158 return (KERN_RESOURCE_SHORTAGE); 4159 } 4160 entry->cred = cred; 4161 } 4162 vm_object_shadow(&entry->object.vm_object, 4163 &entry->offset, size); 4164 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4165 eobject = entry->object.vm_object; 4166 if (eobject->cred != NULL) { 4167 /* 4168 * The object was not shadowed. 4169 */ 4170 swap_release_by_cred(size, entry->cred); 4171 crfree(entry->cred); 4172 entry->cred = NULL; 4173 } else if (entry->cred != NULL) { 4174 VM_OBJECT_WLOCK(eobject); 4175 eobject->cred = entry->cred; 4176 eobject->charge = size; 4177 VM_OBJECT_WUNLOCK(eobject); 4178 entry->cred = NULL; 4179 } 4180 4181 vm_map_lock_downgrade(map); 4182 } else { 4183 /* 4184 * We're attempting to read a copy-on-write page -- 4185 * don't allow writes. 4186 */ 4187 prot &= ~VM_PROT_WRITE; 4188 } 4189 } 4190 4191 /* 4192 * Create an object if necessary. 4193 */ 4194 if (entry->object.vm_object == NULL && 4195 !map->system_map) { 4196 if (vm_map_lock_upgrade(map)) 4197 goto RetryLookup; 4198 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4199 atop(size)); 4200 entry->offset = 0; 4201 if (entry->cred != NULL) { 4202 VM_OBJECT_WLOCK(entry->object.vm_object); 4203 entry->object.vm_object->cred = entry->cred; 4204 entry->object.vm_object->charge = size; 4205 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4206 entry->cred = NULL; 4207 } 4208 vm_map_lock_downgrade(map); 4209 } 4210 4211 /* 4212 * Return the object/offset from this entry. If the entry was 4213 * copy-on-write or empty, it has been fixed up. 4214 */ 4215 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4216 *object = entry->object.vm_object; 4217 4218 *out_prot = prot; 4219 return (KERN_SUCCESS); 4220 } 4221 4222 /* 4223 * vm_map_lookup_locked: 4224 * 4225 * Lookup the faulting address. A version of vm_map_lookup that returns 4226 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4227 */ 4228 int 4229 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4230 vm_offset_t vaddr, 4231 vm_prot_t fault_typea, 4232 vm_map_entry_t *out_entry, /* OUT */ 4233 vm_object_t *object, /* OUT */ 4234 vm_pindex_t *pindex, /* OUT */ 4235 vm_prot_t *out_prot, /* OUT */ 4236 boolean_t *wired) /* OUT */ 4237 { 4238 vm_map_entry_t entry; 4239 vm_map_t map = *var_map; 4240 vm_prot_t prot; 4241 vm_prot_t fault_type = fault_typea; 4242 4243 /* 4244 * Lookup the faulting address. 4245 */ 4246 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4247 return (KERN_INVALID_ADDRESS); 4248 4249 entry = *out_entry; 4250 4251 /* 4252 * Fail if the entry refers to a submap. 4253 */ 4254 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4255 return (KERN_FAILURE); 4256 4257 /* 4258 * Check whether this task is allowed to have this page. 4259 */ 4260 prot = entry->protection; 4261 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4262 if ((fault_type & prot) != fault_type) 4263 return (KERN_PROTECTION_FAILURE); 4264 4265 /* 4266 * If this page is not pageable, we have to get it for all possible 4267 * accesses. 4268 */ 4269 *wired = (entry->wired_count != 0); 4270 if (*wired) 4271 fault_type = entry->protection; 4272 4273 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4274 /* 4275 * Fail if the entry was copy-on-write for a write fault. 4276 */ 4277 if (fault_type & VM_PROT_WRITE) 4278 return (KERN_FAILURE); 4279 /* 4280 * We're attempting to read a copy-on-write page -- 4281 * don't allow writes. 4282 */ 4283 prot &= ~VM_PROT_WRITE; 4284 } 4285 4286 /* 4287 * Fail if an object should be created. 4288 */ 4289 if (entry->object.vm_object == NULL && !map->system_map) 4290 return (KERN_FAILURE); 4291 4292 /* 4293 * Return the object/offset from this entry. If the entry was 4294 * copy-on-write or empty, it has been fixed up. 4295 */ 4296 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4297 *object = entry->object.vm_object; 4298 4299 *out_prot = prot; 4300 return (KERN_SUCCESS); 4301 } 4302 4303 /* 4304 * vm_map_lookup_done: 4305 * 4306 * Releases locks acquired by a vm_map_lookup 4307 * (according to the handle returned by that lookup). 4308 */ 4309 void 4310 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4311 { 4312 /* 4313 * Unlock the main-level map 4314 */ 4315 vm_map_unlock_read(map); 4316 } 4317 4318 vm_offset_t 4319 vm_map_max_KBI(const struct vm_map *map) 4320 { 4321 4322 return (map->max_offset); 4323 } 4324 4325 vm_offset_t 4326 vm_map_min_KBI(const struct vm_map *map) 4327 { 4328 4329 return (map->min_offset); 4330 } 4331 4332 pmap_t 4333 vm_map_pmap_KBI(vm_map_t map) 4334 { 4335 4336 return (map->pmap); 4337 } 4338 4339 #include "opt_ddb.h" 4340 #ifdef DDB 4341 #include <sys/kernel.h> 4342 4343 #include <ddb/ddb.h> 4344 4345 static void 4346 vm_map_print(vm_map_t map) 4347 { 4348 vm_map_entry_t entry; 4349 4350 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4351 (void *)map, 4352 (void *)map->pmap, map->nentries, map->timestamp); 4353 4354 db_indent += 2; 4355 for (entry = map->header.next; entry != &map->header; 4356 entry = entry->next) { 4357 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4358 (void *)entry, (void *)entry->start, (void *)entry->end, 4359 entry->eflags); 4360 { 4361 static char *inheritance_name[4] = 4362 {"share", "copy", "none", "donate_copy"}; 4363 4364 db_iprintf(" prot=%x/%x/%s", 4365 entry->protection, 4366 entry->max_protection, 4367 inheritance_name[(int)(unsigned char)entry->inheritance]); 4368 if (entry->wired_count != 0) 4369 db_printf(", wired"); 4370 } 4371 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4372 db_printf(", share=%p, offset=0x%jx\n", 4373 (void *)entry->object.sub_map, 4374 (uintmax_t)entry->offset); 4375 if ((entry->prev == &map->header) || 4376 (entry->prev->object.sub_map != 4377 entry->object.sub_map)) { 4378 db_indent += 2; 4379 vm_map_print((vm_map_t)entry->object.sub_map); 4380 db_indent -= 2; 4381 } 4382 } else { 4383 if (entry->cred != NULL) 4384 db_printf(", ruid %d", entry->cred->cr_ruid); 4385 db_printf(", object=%p, offset=0x%jx", 4386 (void *)entry->object.vm_object, 4387 (uintmax_t)entry->offset); 4388 if (entry->object.vm_object && entry->object.vm_object->cred) 4389 db_printf(", obj ruid %d charge %jx", 4390 entry->object.vm_object->cred->cr_ruid, 4391 (uintmax_t)entry->object.vm_object->charge); 4392 if (entry->eflags & MAP_ENTRY_COW) 4393 db_printf(", copy (%s)", 4394 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4395 db_printf("\n"); 4396 4397 if ((entry->prev == &map->header) || 4398 (entry->prev->object.vm_object != 4399 entry->object.vm_object)) { 4400 db_indent += 2; 4401 vm_object_print((db_expr_t)(intptr_t) 4402 entry->object.vm_object, 4403 0, 0, (char *)0); 4404 db_indent -= 2; 4405 } 4406 } 4407 } 4408 db_indent -= 2; 4409 } 4410 4411 DB_SHOW_COMMAND(map, map) 4412 { 4413 4414 if (!have_addr) { 4415 db_printf("usage: show map <addr>\n"); 4416 return; 4417 } 4418 vm_map_print((vm_map_t)addr); 4419 } 4420 4421 DB_SHOW_COMMAND(procvm, procvm) 4422 { 4423 struct proc *p; 4424 4425 if (have_addr) { 4426 p = db_lookup_proc(addr); 4427 } else { 4428 p = curproc; 4429 } 4430 4431 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4432 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4433 (void *)vmspace_pmap(p->p_vmspace)); 4434 4435 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4436 } 4437 4438 #endif /* DDB */ 4439