1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/ktr.h> 74 #include <sys/lock.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/mman.h> 79 #include <sys/vnode.h> 80 #include <sys/racct.h> 81 #include <sys/resourcevar.h> 82 #include <sys/rwlock.h> 83 #include <sys/file.h> 84 #include <sys/sysctl.h> 85 #include <sys/sysent.h> 86 #include <sys/shm.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_pager.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/uma.h> 100 101 /* 102 * Virtual memory maps provide for the mapping, protection, 103 * and sharing of virtual memory objects. In addition, 104 * this module provides for an efficient virtual copy of 105 * memory from one map to another. 106 * 107 * Synchronization is required prior to most operations. 108 * 109 * Maps consist of an ordered doubly-linked list of simple 110 * entries; a self-adjusting binary search tree of these 111 * entries is used to speed up lookups. 112 * 113 * Since portions of maps are specified by start/end addresses, 114 * which may not align with existing map entries, all 115 * routines merely "clip" entries to these start/end values. 116 * [That is, an entry is split into two, bordering at a 117 * start or end value.] Note that these clippings may not 118 * always be necessary (as the two resulting entries are then 119 * not changed); however, the clipping is done for convenience. 120 * 121 * As mentioned above, virtual copy operations are performed 122 * by copying VM object references from one map to 123 * another, and then marking both regions as copy-on-write. 124 */ 125 126 static struct mtx map_sleep_mtx; 127 static uma_zone_t mapentzone; 128 static uma_zone_t kmapentzone; 129 static uma_zone_t mapzone; 130 static uma_zone_t vmspace_zone; 131 static int vmspace_zinit(void *mem, int size, int flags); 132 static int vm_map_zinit(void *mem, int ize, int flags); 133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 134 vm_offset_t max); 135 static int vm_map_alignspace(vm_map_t map, vm_object_t object, 136 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, 137 vm_offset_t max_addr, vm_offset_t alignment); 138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 142 vm_map_entry_t gap_entry); 143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 144 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 145 #ifdef INVARIANTS 146 static void vm_map_zdtor(void *mem, int size, void *arg); 147 static void vmspace_zdtor(void *mem, int size, void *arg); 148 #endif 149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 150 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 151 int cow); 152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 153 vm_offset_t failed_addr); 154 155 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 156 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 157 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 158 159 /* 160 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 161 * stable. 162 */ 163 #define PROC_VMSPACE_LOCK(p) do { } while (0) 164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 165 166 /* 167 * VM_MAP_RANGE_CHECK: [ internal use only ] 168 * 169 * Asserts that the starting and ending region 170 * addresses fall within the valid range of the map. 171 */ 172 #define VM_MAP_RANGE_CHECK(map, start, end) \ 173 { \ 174 if (start < vm_map_min(map)) \ 175 start = vm_map_min(map); \ 176 if (end > vm_map_max(map)) \ 177 end = vm_map_max(map); \ 178 if (start > end) \ 179 start = end; \ 180 } 181 182 /* 183 * vm_map_startup: 184 * 185 * Initialize the vm_map module. Must be called before 186 * any other vm_map routines. 187 * 188 * Map and entry structures are allocated from the general 189 * purpose memory pool with some exceptions: 190 * 191 * - The kernel map and kmem submap are allocated statically. 192 * - Kernel map entries are allocated out of a static pool. 193 * 194 * These restrictions are necessary since malloc() uses the 195 * maps and requires map entries. 196 */ 197 198 void 199 vm_map_startup(void) 200 { 201 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 202 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 203 #ifdef INVARIANTS 204 vm_map_zdtor, 205 #else 206 NULL, 207 #endif 208 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 209 uma_prealloc(mapzone, MAX_KMAP); 210 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 212 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 213 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 214 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 215 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 216 #ifdef INVARIANTS 217 vmspace_zdtor, 218 #else 219 NULL, 220 #endif 221 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 222 } 223 224 static int 225 vmspace_zinit(void *mem, int size, int flags) 226 { 227 struct vmspace *vm; 228 229 vm = (struct vmspace *)mem; 230 231 vm->vm_map.pmap = NULL; 232 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 233 PMAP_LOCK_INIT(vmspace_pmap(vm)); 234 return (0); 235 } 236 237 static int 238 vm_map_zinit(void *mem, int size, int flags) 239 { 240 vm_map_t map; 241 242 map = (vm_map_t)mem; 243 memset(map, 0, sizeof(*map)); 244 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 245 sx_init(&map->lock, "vm map (user)"); 246 return (0); 247 } 248 249 #ifdef INVARIANTS 250 static void 251 vmspace_zdtor(void *mem, int size, void *arg) 252 { 253 struct vmspace *vm; 254 255 vm = (struct vmspace *)mem; 256 257 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 258 } 259 static void 260 vm_map_zdtor(void *mem, int size, void *arg) 261 { 262 vm_map_t map; 263 264 map = (vm_map_t)mem; 265 KASSERT(map->nentries == 0, 266 ("map %p nentries == %d on free.", 267 map, map->nentries)); 268 KASSERT(map->size == 0, 269 ("map %p size == %lu on free.", 270 map, (unsigned long)map->size)); 271 } 272 #endif /* INVARIANTS */ 273 274 /* 275 * Allocate a vmspace structure, including a vm_map and pmap, 276 * and initialize those structures. The refcnt is set to 1. 277 * 278 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 279 */ 280 struct vmspace * 281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 282 { 283 struct vmspace *vm; 284 285 vm = uma_zalloc(vmspace_zone, M_WAITOK); 286 287 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 288 289 if (pinit == NULL) 290 pinit = &pmap_pinit; 291 292 if (!pinit(vmspace_pmap(vm))) { 293 uma_zfree(vmspace_zone, vm); 294 return (NULL); 295 } 296 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 297 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 298 vm->vm_refcnt = 1; 299 vm->vm_shm = NULL; 300 vm->vm_swrss = 0; 301 vm->vm_tsize = 0; 302 vm->vm_dsize = 0; 303 vm->vm_ssize = 0; 304 vm->vm_taddr = 0; 305 vm->vm_daddr = 0; 306 vm->vm_maxsaddr = 0; 307 return (vm); 308 } 309 310 #ifdef RACCT 311 static void 312 vmspace_container_reset(struct proc *p) 313 { 314 315 PROC_LOCK(p); 316 racct_set(p, RACCT_DATA, 0); 317 racct_set(p, RACCT_STACK, 0); 318 racct_set(p, RACCT_RSS, 0); 319 racct_set(p, RACCT_MEMLOCK, 0); 320 racct_set(p, RACCT_VMEM, 0); 321 PROC_UNLOCK(p); 322 } 323 #endif 324 325 static inline void 326 vmspace_dofree(struct vmspace *vm) 327 { 328 329 CTR1(KTR_VM, "vmspace_free: %p", vm); 330 331 /* 332 * Make sure any SysV shm is freed, it might not have been in 333 * exit1(). 334 */ 335 shmexit(vm); 336 337 /* 338 * Lock the map, to wait out all other references to it. 339 * Delete all of the mappings and pages they hold, then call 340 * the pmap module to reclaim anything left. 341 */ 342 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 343 vm_map_max(&vm->vm_map)); 344 345 pmap_release(vmspace_pmap(vm)); 346 vm->vm_map.pmap = NULL; 347 uma_zfree(vmspace_zone, vm); 348 } 349 350 void 351 vmspace_free(struct vmspace *vm) 352 { 353 354 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 355 "vmspace_free() called"); 356 357 if (vm->vm_refcnt == 0) 358 panic("vmspace_free: attempt to free already freed vmspace"); 359 360 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 361 vmspace_dofree(vm); 362 } 363 364 void 365 vmspace_exitfree(struct proc *p) 366 { 367 struct vmspace *vm; 368 369 PROC_VMSPACE_LOCK(p); 370 vm = p->p_vmspace; 371 p->p_vmspace = NULL; 372 PROC_VMSPACE_UNLOCK(p); 373 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 374 vmspace_free(vm); 375 } 376 377 void 378 vmspace_exit(struct thread *td) 379 { 380 int refcnt; 381 struct vmspace *vm; 382 struct proc *p; 383 384 /* 385 * Release user portion of address space. 386 * This releases references to vnodes, 387 * which could cause I/O if the file has been unlinked. 388 * Need to do this early enough that we can still sleep. 389 * 390 * The last exiting process to reach this point releases as 391 * much of the environment as it can. vmspace_dofree() is the 392 * slower fallback in case another process had a temporary 393 * reference to the vmspace. 394 */ 395 396 p = td->td_proc; 397 vm = p->p_vmspace; 398 atomic_add_int(&vmspace0.vm_refcnt, 1); 399 do { 400 refcnt = vm->vm_refcnt; 401 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 402 /* Switch now since other proc might free vmspace */ 403 PROC_VMSPACE_LOCK(p); 404 p->p_vmspace = &vmspace0; 405 PROC_VMSPACE_UNLOCK(p); 406 pmap_activate(td); 407 } 408 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 409 if (refcnt == 1) { 410 if (p->p_vmspace != vm) { 411 /* vmspace not yet freed, switch back */ 412 PROC_VMSPACE_LOCK(p); 413 p->p_vmspace = vm; 414 PROC_VMSPACE_UNLOCK(p); 415 pmap_activate(td); 416 } 417 pmap_remove_pages(vmspace_pmap(vm)); 418 /* Switch now since this proc will free vmspace */ 419 PROC_VMSPACE_LOCK(p); 420 p->p_vmspace = &vmspace0; 421 PROC_VMSPACE_UNLOCK(p); 422 pmap_activate(td); 423 vmspace_dofree(vm); 424 } 425 #ifdef RACCT 426 if (racct_enable) 427 vmspace_container_reset(p); 428 #endif 429 } 430 431 /* Acquire reference to vmspace owned by another process. */ 432 433 struct vmspace * 434 vmspace_acquire_ref(struct proc *p) 435 { 436 struct vmspace *vm; 437 int refcnt; 438 439 PROC_VMSPACE_LOCK(p); 440 vm = p->p_vmspace; 441 if (vm == NULL) { 442 PROC_VMSPACE_UNLOCK(p); 443 return (NULL); 444 } 445 do { 446 refcnt = vm->vm_refcnt; 447 if (refcnt <= 0) { /* Avoid 0->1 transition */ 448 PROC_VMSPACE_UNLOCK(p); 449 return (NULL); 450 } 451 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 452 if (vm != p->p_vmspace) { 453 PROC_VMSPACE_UNLOCK(p); 454 vmspace_free(vm); 455 return (NULL); 456 } 457 PROC_VMSPACE_UNLOCK(p); 458 return (vm); 459 } 460 461 /* 462 * Switch between vmspaces in an AIO kernel process. 463 * 464 * The AIO kernel processes switch to and from a user process's 465 * vmspace while performing an I/O operation on behalf of a user 466 * process. The new vmspace is either the vmspace of a user process 467 * obtained from an active AIO request or the initial vmspace of the 468 * AIO kernel process (when it is idling). Because user processes 469 * will block to drain any active AIO requests before proceeding in 470 * exit() or execve(), the vmspace reference count for these vmspaces 471 * can never be 0. This allows for a much simpler implementation than 472 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel 473 * processes hold an extra reference on their initial vmspace for the 474 * life of the process so that this guarantee is true for any vmspace 475 * passed as 'newvm'. 476 */ 477 void 478 vmspace_switch_aio(struct vmspace *newvm) 479 { 480 struct vmspace *oldvm; 481 482 /* XXX: Need some way to assert that this is an aio daemon. */ 483 484 KASSERT(newvm->vm_refcnt > 0, 485 ("vmspace_switch_aio: newvm unreferenced")); 486 487 oldvm = curproc->p_vmspace; 488 if (oldvm == newvm) 489 return; 490 491 /* 492 * Point to the new address space and refer to it. 493 */ 494 curproc->p_vmspace = newvm; 495 atomic_add_int(&newvm->vm_refcnt, 1); 496 497 /* Activate the new mapping. */ 498 pmap_activate(curthread); 499 500 /* Remove the daemon's reference to the old address space. */ 501 KASSERT(oldvm->vm_refcnt > 1, 502 ("vmspace_switch_aio: oldvm dropping last reference")); 503 vmspace_free(oldvm); 504 } 505 506 void 507 _vm_map_lock(vm_map_t map, const char *file, int line) 508 { 509 510 if (map->system_map) 511 mtx_lock_flags_(&map->system_mtx, 0, file, line); 512 else 513 sx_xlock_(&map->lock, file, line); 514 map->timestamp++; 515 } 516 517 static void 518 vm_map_process_deferred(void) 519 { 520 struct thread *td; 521 vm_map_entry_t entry, next; 522 vm_object_t object; 523 524 td = curthread; 525 entry = td->td_map_def_user; 526 td->td_map_def_user = NULL; 527 while (entry != NULL) { 528 next = entry->next; 529 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { 530 /* 531 * Decrement the object's writemappings and 532 * possibly the vnode's v_writecount. 533 */ 534 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 535 ("Submap with writecount")); 536 object = entry->object.vm_object; 537 KASSERT(object != NULL, ("No object for writecount")); 538 vnode_pager_release_writecount(object, entry->start, 539 entry->end); 540 } 541 vm_map_entry_deallocate(entry, FALSE); 542 entry = next; 543 } 544 } 545 546 void 547 _vm_map_unlock(vm_map_t map, const char *file, int line) 548 { 549 550 if (map->system_map) 551 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 552 else { 553 sx_xunlock_(&map->lock, file, line); 554 vm_map_process_deferred(); 555 } 556 } 557 558 void 559 _vm_map_lock_read(vm_map_t map, const char *file, int line) 560 { 561 562 if (map->system_map) 563 mtx_lock_flags_(&map->system_mtx, 0, file, line); 564 else 565 sx_slock_(&map->lock, file, line); 566 } 567 568 void 569 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 570 { 571 572 if (map->system_map) 573 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 574 else { 575 sx_sunlock_(&map->lock, file, line); 576 vm_map_process_deferred(); 577 } 578 } 579 580 int 581 _vm_map_trylock(vm_map_t map, const char *file, int line) 582 { 583 int error; 584 585 error = map->system_map ? 586 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 587 !sx_try_xlock_(&map->lock, file, line); 588 if (error == 0) 589 map->timestamp++; 590 return (error == 0); 591 } 592 593 int 594 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 595 { 596 int error; 597 598 error = map->system_map ? 599 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 600 !sx_try_slock_(&map->lock, file, line); 601 return (error == 0); 602 } 603 604 /* 605 * _vm_map_lock_upgrade: [ internal use only ] 606 * 607 * Tries to upgrade a read (shared) lock on the specified map to a write 608 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 609 * non-zero value if the upgrade fails. If the upgrade fails, the map is 610 * returned without a read or write lock held. 611 * 612 * Requires that the map be read locked. 613 */ 614 int 615 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 616 { 617 unsigned int last_timestamp; 618 619 if (map->system_map) { 620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 621 } else { 622 if (!sx_try_upgrade_(&map->lock, file, line)) { 623 last_timestamp = map->timestamp; 624 sx_sunlock_(&map->lock, file, line); 625 vm_map_process_deferred(); 626 /* 627 * If the map's timestamp does not change while the 628 * map is unlocked, then the upgrade succeeds. 629 */ 630 sx_xlock_(&map->lock, file, line); 631 if (last_timestamp != map->timestamp) { 632 sx_xunlock_(&map->lock, file, line); 633 return (1); 634 } 635 } 636 } 637 map->timestamp++; 638 return (0); 639 } 640 641 void 642 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 643 { 644 645 if (map->system_map) { 646 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 647 } else 648 sx_downgrade_(&map->lock, file, line); 649 } 650 651 /* 652 * vm_map_locked: 653 * 654 * Returns a non-zero value if the caller holds a write (exclusive) lock 655 * on the specified map and the value "0" otherwise. 656 */ 657 int 658 vm_map_locked(vm_map_t map) 659 { 660 661 if (map->system_map) 662 return (mtx_owned(&map->system_mtx)); 663 else 664 return (sx_xlocked(&map->lock)); 665 } 666 667 #ifdef INVARIANTS 668 static void 669 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 670 { 671 672 if (map->system_map) 673 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 674 else 675 sx_assert_(&map->lock, SA_XLOCKED, file, line); 676 } 677 678 #define VM_MAP_ASSERT_LOCKED(map) \ 679 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 680 #else 681 #define VM_MAP_ASSERT_LOCKED(map) 682 #endif 683 684 /* 685 * _vm_map_unlock_and_wait: 686 * 687 * Atomically releases the lock on the specified map and puts the calling 688 * thread to sleep. The calling thread will remain asleep until either 689 * vm_map_wakeup() is performed on the map or the specified timeout is 690 * exceeded. 691 * 692 * WARNING! This function does not perform deferred deallocations of 693 * objects and map entries. Therefore, the calling thread is expected to 694 * reacquire the map lock after reawakening and later perform an ordinary 695 * unlock operation, such as vm_map_unlock(), before completing its 696 * operation on the map. 697 */ 698 int 699 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 700 { 701 702 mtx_lock(&map_sleep_mtx); 703 if (map->system_map) 704 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 705 else 706 sx_xunlock_(&map->lock, file, line); 707 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 708 timo)); 709 } 710 711 /* 712 * vm_map_wakeup: 713 * 714 * Awaken any threads that have slept on the map using 715 * vm_map_unlock_and_wait(). 716 */ 717 void 718 vm_map_wakeup(vm_map_t map) 719 { 720 721 /* 722 * Acquire and release map_sleep_mtx to prevent a wakeup() 723 * from being performed (and lost) between the map unlock 724 * and the msleep() in _vm_map_unlock_and_wait(). 725 */ 726 mtx_lock(&map_sleep_mtx); 727 mtx_unlock(&map_sleep_mtx); 728 wakeup(&map->root); 729 } 730 731 void 732 vm_map_busy(vm_map_t map) 733 { 734 735 VM_MAP_ASSERT_LOCKED(map); 736 map->busy++; 737 } 738 739 void 740 vm_map_unbusy(vm_map_t map) 741 { 742 743 VM_MAP_ASSERT_LOCKED(map); 744 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 745 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 746 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 747 wakeup(&map->busy); 748 } 749 } 750 751 void 752 vm_map_wait_busy(vm_map_t map) 753 { 754 755 VM_MAP_ASSERT_LOCKED(map); 756 while (map->busy) { 757 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 758 if (map->system_map) 759 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 760 else 761 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 762 } 763 map->timestamp++; 764 } 765 766 long 767 vmspace_resident_count(struct vmspace *vmspace) 768 { 769 return pmap_resident_count(vmspace_pmap(vmspace)); 770 } 771 772 /* 773 * vm_map_create: 774 * 775 * Creates and returns a new empty VM map with 776 * the given physical map structure, and having 777 * the given lower and upper address bounds. 778 */ 779 vm_map_t 780 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 781 { 782 vm_map_t result; 783 784 result = uma_zalloc(mapzone, M_WAITOK); 785 CTR1(KTR_VM, "vm_map_create: %p", result); 786 _vm_map_init(result, pmap, min, max); 787 return (result); 788 } 789 790 /* 791 * Initialize an existing vm_map structure 792 * such as that in the vmspace structure. 793 */ 794 static void 795 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 796 { 797 798 map->header.next = map->header.prev = &map->header; 799 map->needs_wakeup = FALSE; 800 map->system_map = 0; 801 map->pmap = pmap; 802 map->header.end = min; 803 map->header.start = max; 804 map->flags = 0; 805 map->root = NULL; 806 map->timestamp = 0; 807 map->busy = 0; 808 } 809 810 void 811 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 812 { 813 814 _vm_map_init(map, pmap, min, max); 815 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 816 sx_init(&map->lock, "user map"); 817 } 818 819 /* 820 * vm_map_entry_dispose: [ internal use only ] 821 * 822 * Inverse of vm_map_entry_create. 823 */ 824 static void 825 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 826 { 827 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 828 } 829 830 /* 831 * vm_map_entry_create: [ internal use only ] 832 * 833 * Allocates a VM map entry for insertion. 834 * No entry fields are filled in. 835 */ 836 static vm_map_entry_t 837 vm_map_entry_create(vm_map_t map) 838 { 839 vm_map_entry_t new_entry; 840 841 if (map->system_map) 842 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 843 else 844 new_entry = uma_zalloc(mapentzone, M_WAITOK); 845 if (new_entry == NULL) 846 panic("vm_map_entry_create: kernel resources exhausted"); 847 return (new_entry); 848 } 849 850 /* 851 * vm_map_entry_set_behavior: 852 * 853 * Set the expected access behavior, either normal, random, or 854 * sequential. 855 */ 856 static inline void 857 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 858 { 859 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 860 (behavior & MAP_ENTRY_BEHAV_MASK); 861 } 862 863 /* 864 * vm_map_entry_set_max_free: 865 * 866 * Set the max_free field in a vm_map_entry. 867 */ 868 static inline void 869 vm_map_entry_set_max_free(vm_map_entry_t entry) 870 { 871 872 entry->max_free = entry->adj_free; 873 if (entry->left != NULL && entry->left->max_free > entry->max_free) 874 entry->max_free = entry->left->max_free; 875 if (entry->right != NULL && entry->right->max_free > entry->max_free) 876 entry->max_free = entry->right->max_free; 877 } 878 879 /* 880 * vm_map_entry_splay: 881 * 882 * The Sleator and Tarjan top-down splay algorithm with the 883 * following variation. Max_free must be computed bottom-up, so 884 * on the downward pass, maintain the left and right spines in 885 * reverse order. Then, make a second pass up each side to fix 886 * the pointers and compute max_free. The time bound is O(log n) 887 * amortized. 888 * 889 * The new root is the vm_map_entry containing "addr", or else an 890 * adjacent entry (lower or higher) if addr is not in the tree. 891 * 892 * The map must be locked, and leaves it so. 893 * 894 * Returns: the new root. 895 */ 896 static vm_map_entry_t 897 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 898 { 899 vm_map_entry_t llist, rlist; 900 vm_map_entry_t ltree, rtree; 901 vm_map_entry_t y; 902 903 /* Special case of empty tree. */ 904 if (root == NULL) 905 return (root); 906 907 /* 908 * Pass One: Splay down the tree until we find addr or a NULL 909 * pointer where addr would go. llist and rlist are the two 910 * sides in reverse order (bottom-up), with llist linked by 911 * the right pointer and rlist linked by the left pointer in 912 * the vm_map_entry. Wait until Pass Two to set max_free on 913 * the two spines. 914 */ 915 llist = NULL; 916 rlist = NULL; 917 for (;;) { 918 /* root is never NULL in here. */ 919 if (addr < root->start) { 920 y = root->left; 921 if (y == NULL) 922 break; 923 if (addr < y->start && y->left != NULL) { 924 /* Rotate right and put y on rlist. */ 925 root->left = y->right; 926 y->right = root; 927 vm_map_entry_set_max_free(root); 928 root = y->left; 929 y->left = rlist; 930 rlist = y; 931 } else { 932 /* Put root on rlist. */ 933 root->left = rlist; 934 rlist = root; 935 root = y; 936 } 937 } else if (addr >= root->end) { 938 y = root->right; 939 if (y == NULL) 940 break; 941 if (addr >= y->end && y->right != NULL) { 942 /* Rotate left and put y on llist. */ 943 root->right = y->left; 944 y->left = root; 945 vm_map_entry_set_max_free(root); 946 root = y->right; 947 y->right = llist; 948 llist = y; 949 } else { 950 /* Put root on llist. */ 951 root->right = llist; 952 llist = root; 953 root = y; 954 } 955 } else 956 break; 957 } 958 959 /* 960 * Pass Two: Walk back up the two spines, flip the pointers 961 * and set max_free. The subtrees of the root go at the 962 * bottom of llist and rlist. 963 */ 964 ltree = root->left; 965 while (llist != NULL) { 966 y = llist->right; 967 llist->right = ltree; 968 vm_map_entry_set_max_free(llist); 969 ltree = llist; 970 llist = y; 971 } 972 rtree = root->right; 973 while (rlist != NULL) { 974 y = rlist->left; 975 rlist->left = rtree; 976 vm_map_entry_set_max_free(rlist); 977 rtree = rlist; 978 rlist = y; 979 } 980 981 /* 982 * Final assembly: add ltree and rtree as subtrees of root. 983 */ 984 root->left = ltree; 985 root->right = rtree; 986 vm_map_entry_set_max_free(root); 987 988 return (root); 989 } 990 991 /* 992 * vm_map_entry_{un,}link: 993 * 994 * Insert/remove entries from maps. 995 */ 996 static void 997 vm_map_entry_link(vm_map_t map, 998 vm_map_entry_t after_where, 999 vm_map_entry_t entry) 1000 { 1001 1002 CTR4(KTR_VM, 1003 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 1004 map->nentries, entry, after_where); 1005 VM_MAP_ASSERT_LOCKED(map); 1006 KASSERT(after_where->end <= entry->start, 1007 ("vm_map_entry_link: prev end %jx new start %jx overlap", 1008 (uintmax_t)after_where->end, (uintmax_t)entry->start)); 1009 KASSERT(entry->end <= after_where->next->start, 1010 ("vm_map_entry_link: new end %jx next start %jx overlap", 1011 (uintmax_t)entry->end, (uintmax_t)after_where->next->start)); 1012 1013 map->nentries++; 1014 entry->prev = after_where; 1015 entry->next = after_where->next; 1016 entry->next->prev = entry; 1017 after_where->next = entry; 1018 1019 if (after_where != &map->header) { 1020 if (after_where != map->root) 1021 vm_map_entry_splay(after_where->start, map->root); 1022 entry->right = after_where->right; 1023 entry->left = after_where; 1024 after_where->right = NULL; 1025 after_where->adj_free = entry->start - after_where->end; 1026 vm_map_entry_set_max_free(after_where); 1027 } else { 1028 entry->right = map->root; 1029 entry->left = NULL; 1030 } 1031 entry->adj_free = entry->next->start - entry->end; 1032 vm_map_entry_set_max_free(entry); 1033 map->root = entry; 1034 } 1035 1036 static void 1037 vm_map_entry_unlink(vm_map_t map, 1038 vm_map_entry_t entry) 1039 { 1040 vm_map_entry_t next, prev, root; 1041 1042 VM_MAP_ASSERT_LOCKED(map); 1043 if (entry != map->root) 1044 vm_map_entry_splay(entry->start, map->root); 1045 if (entry->left == NULL) 1046 root = entry->right; 1047 else { 1048 root = vm_map_entry_splay(entry->start, entry->left); 1049 root->right = entry->right; 1050 root->adj_free = entry->next->start - root->end; 1051 vm_map_entry_set_max_free(root); 1052 } 1053 map->root = root; 1054 1055 prev = entry->prev; 1056 next = entry->next; 1057 next->prev = prev; 1058 prev->next = next; 1059 map->nentries--; 1060 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1061 map->nentries, entry); 1062 } 1063 1064 /* 1065 * vm_map_entry_resize_free: 1066 * 1067 * Recompute the amount of free space following a vm_map_entry 1068 * and propagate that value up the tree. Call this function after 1069 * resizing a map entry in-place, that is, without a call to 1070 * vm_map_entry_link() or _unlink(). 1071 * 1072 * The map must be locked, and leaves it so. 1073 */ 1074 static void 1075 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 1076 { 1077 1078 /* 1079 * Using splay trees without parent pointers, propagating 1080 * max_free up the tree is done by moving the entry to the 1081 * root and making the change there. 1082 */ 1083 if (entry != map->root) 1084 map->root = vm_map_entry_splay(entry->start, map->root); 1085 1086 entry->adj_free = entry->next->start - entry->end; 1087 vm_map_entry_set_max_free(entry); 1088 } 1089 1090 /* 1091 * vm_map_lookup_entry: [ internal use only ] 1092 * 1093 * Finds the map entry containing (or 1094 * immediately preceding) the specified address 1095 * in the given map; the entry is returned 1096 * in the "entry" parameter. The boolean 1097 * result indicates whether the address is 1098 * actually contained in the map. 1099 */ 1100 boolean_t 1101 vm_map_lookup_entry( 1102 vm_map_t map, 1103 vm_offset_t address, 1104 vm_map_entry_t *entry) /* OUT */ 1105 { 1106 vm_map_entry_t cur; 1107 boolean_t locked; 1108 1109 /* 1110 * If the map is empty, then the map entry immediately preceding 1111 * "address" is the map's header. 1112 */ 1113 cur = map->root; 1114 if (cur == NULL) 1115 *entry = &map->header; 1116 else if (address >= cur->start && cur->end > address) { 1117 *entry = cur; 1118 return (TRUE); 1119 } else if ((locked = vm_map_locked(map)) || 1120 sx_try_upgrade(&map->lock)) { 1121 /* 1122 * Splay requires a write lock on the map. However, it only 1123 * restructures the binary search tree; it does not otherwise 1124 * change the map. Thus, the map's timestamp need not change 1125 * on a temporary upgrade. 1126 */ 1127 map->root = cur = vm_map_entry_splay(address, cur); 1128 if (!locked) 1129 sx_downgrade(&map->lock); 1130 1131 /* 1132 * If "address" is contained within a map entry, the new root 1133 * is that map entry. Otherwise, the new root is a map entry 1134 * immediately before or after "address". 1135 */ 1136 if (address >= cur->start) { 1137 *entry = cur; 1138 if (cur->end > address) 1139 return (TRUE); 1140 } else 1141 *entry = cur->prev; 1142 } else 1143 /* 1144 * Since the map is only locked for read access, perform a 1145 * standard binary search tree lookup for "address". 1146 */ 1147 for (;;) { 1148 if (address < cur->start) { 1149 if (cur->left == NULL) { 1150 *entry = cur->prev; 1151 break; 1152 } 1153 cur = cur->left; 1154 } else if (cur->end > address) { 1155 *entry = cur; 1156 return (TRUE); 1157 } else { 1158 if (cur->right == NULL) { 1159 *entry = cur; 1160 break; 1161 } 1162 cur = cur->right; 1163 } 1164 } 1165 return (FALSE); 1166 } 1167 1168 /* 1169 * vm_map_insert: 1170 * 1171 * Inserts the given whole VM object into the target 1172 * map at the specified address range. The object's 1173 * size should match that of the address range. 1174 * 1175 * Requires that the map be locked, and leaves it so. 1176 * 1177 * If object is non-NULL, ref count must be bumped by caller 1178 * prior to making call to account for the new entry. 1179 */ 1180 int 1181 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1182 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1183 { 1184 vm_map_entry_t new_entry, prev_entry, temp_entry; 1185 struct ucred *cred; 1186 vm_eflags_t protoeflags; 1187 vm_inherit_t inheritance; 1188 1189 VM_MAP_ASSERT_LOCKED(map); 1190 KASSERT(object != kernel_object || 1191 (cow & MAP_COPY_ON_WRITE) == 0, 1192 ("vm_map_insert: kernel object and COW")); 1193 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1194 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1195 KASSERT((prot & ~max) == 0, 1196 ("prot %#x is not subset of max_prot %#x", prot, max)); 1197 1198 /* 1199 * Check that the start and end points are not bogus. 1200 */ 1201 if (start < vm_map_min(map) || end > vm_map_max(map) || 1202 start >= end) 1203 return (KERN_INVALID_ADDRESS); 1204 1205 /* 1206 * Find the entry prior to the proposed starting address; if it's part 1207 * of an existing entry, this range is bogus. 1208 */ 1209 if (vm_map_lookup_entry(map, start, &temp_entry)) 1210 return (KERN_NO_SPACE); 1211 1212 prev_entry = temp_entry; 1213 1214 /* 1215 * Assert that the next entry doesn't overlap the end point. 1216 */ 1217 if (prev_entry->next->start < end) 1218 return (KERN_NO_SPACE); 1219 1220 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1221 max != VM_PROT_NONE)) 1222 return (KERN_INVALID_ARGUMENT); 1223 1224 protoeflags = 0; 1225 if (cow & MAP_COPY_ON_WRITE) 1226 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1227 if (cow & MAP_NOFAULT) 1228 protoeflags |= MAP_ENTRY_NOFAULT; 1229 if (cow & MAP_DISABLE_SYNCER) 1230 protoeflags |= MAP_ENTRY_NOSYNC; 1231 if (cow & MAP_DISABLE_COREDUMP) 1232 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1233 if (cow & MAP_STACK_GROWS_DOWN) 1234 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1235 if (cow & MAP_STACK_GROWS_UP) 1236 protoeflags |= MAP_ENTRY_GROWS_UP; 1237 if (cow & MAP_VN_WRITECOUNT) 1238 protoeflags |= MAP_ENTRY_VN_WRITECNT; 1239 if ((cow & MAP_CREATE_GUARD) != 0) 1240 protoeflags |= MAP_ENTRY_GUARD; 1241 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1242 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1243 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1244 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1245 if (cow & MAP_INHERIT_SHARE) 1246 inheritance = VM_INHERIT_SHARE; 1247 else 1248 inheritance = VM_INHERIT_DEFAULT; 1249 1250 cred = NULL; 1251 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1252 goto charged; 1253 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1254 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1255 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1256 return (KERN_RESOURCE_SHORTAGE); 1257 KASSERT(object == NULL || 1258 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1259 object->cred == NULL, 1260 ("overcommit: vm_map_insert o %p", object)); 1261 cred = curthread->td_ucred; 1262 } 1263 1264 charged: 1265 /* Expand the kernel pmap, if necessary. */ 1266 if (map == kernel_map && end > kernel_vm_end) 1267 pmap_growkernel(end); 1268 if (object != NULL) { 1269 /* 1270 * OBJ_ONEMAPPING must be cleared unless this mapping 1271 * is trivially proven to be the only mapping for any 1272 * of the object's pages. (Object granularity 1273 * reference counting is insufficient to recognize 1274 * aliases with precision.) 1275 */ 1276 VM_OBJECT_WLOCK(object); 1277 if (object->ref_count > 1 || object->shadow_count != 0) 1278 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1279 VM_OBJECT_WUNLOCK(object); 1280 } else if (prev_entry != &map->header && 1281 (prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == protoeflags && 1282 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 && 1283 prev_entry->end == start && (prev_entry->cred == cred || 1284 (prev_entry->object.vm_object != NULL && 1285 prev_entry->object.vm_object->cred == cred)) && 1286 vm_object_coalesce(prev_entry->object.vm_object, 1287 prev_entry->offset, 1288 (vm_size_t)(prev_entry->end - prev_entry->start), 1289 (vm_size_t)(end - prev_entry->end), cred != NULL && 1290 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1291 /* 1292 * We were able to extend the object. Determine if we 1293 * can extend the previous map entry to include the 1294 * new range as well. 1295 */ 1296 if (prev_entry->inheritance == inheritance && 1297 prev_entry->protection == prot && 1298 prev_entry->max_protection == max && 1299 prev_entry->wired_count == 0) { 1300 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1301 0, ("prev_entry %p has incoherent wiring", 1302 prev_entry)); 1303 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1304 map->size += end - prev_entry->end; 1305 prev_entry->end = end; 1306 vm_map_entry_resize_free(map, prev_entry); 1307 vm_map_simplify_entry(map, prev_entry); 1308 return (KERN_SUCCESS); 1309 } 1310 1311 /* 1312 * If we can extend the object but cannot extend the 1313 * map entry, we have to create a new map entry. We 1314 * must bump the ref count on the extended object to 1315 * account for it. object may be NULL. 1316 */ 1317 object = prev_entry->object.vm_object; 1318 offset = prev_entry->offset + 1319 (prev_entry->end - prev_entry->start); 1320 vm_object_reference(object); 1321 if (cred != NULL && object != NULL && object->cred != NULL && 1322 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1323 /* Object already accounts for this uid. */ 1324 cred = NULL; 1325 } 1326 } 1327 if (cred != NULL) 1328 crhold(cred); 1329 1330 /* 1331 * Create a new entry 1332 */ 1333 new_entry = vm_map_entry_create(map); 1334 new_entry->start = start; 1335 new_entry->end = end; 1336 new_entry->cred = NULL; 1337 1338 new_entry->eflags = protoeflags; 1339 new_entry->object.vm_object = object; 1340 new_entry->offset = offset; 1341 1342 new_entry->inheritance = inheritance; 1343 new_entry->protection = prot; 1344 new_entry->max_protection = max; 1345 new_entry->wired_count = 0; 1346 new_entry->wiring_thread = NULL; 1347 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1348 new_entry->next_read = start; 1349 1350 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1351 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1352 new_entry->cred = cred; 1353 1354 /* 1355 * Insert the new entry into the list 1356 */ 1357 vm_map_entry_link(map, prev_entry, new_entry); 1358 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1359 map->size += new_entry->end - new_entry->start; 1360 1361 /* 1362 * Try to coalesce the new entry with both the previous and next 1363 * entries in the list. Previously, we only attempted to coalesce 1364 * with the previous entry when object is NULL. Here, we handle the 1365 * other cases, which are less common. 1366 */ 1367 vm_map_simplify_entry(map, new_entry); 1368 1369 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1370 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1371 end - start, cow & MAP_PREFAULT_PARTIAL); 1372 } 1373 1374 return (KERN_SUCCESS); 1375 } 1376 1377 /* 1378 * vm_map_findspace: 1379 * 1380 * Find the first fit (lowest VM address) for "length" free bytes 1381 * beginning at address >= start in the given map. 1382 * 1383 * In a vm_map_entry, "adj_free" is the amount of free space 1384 * adjacent (higher address) to this entry, and "max_free" is the 1385 * maximum amount of contiguous free space in its subtree. This 1386 * allows finding a free region in one path down the tree, so 1387 * O(log n) amortized with splay trees. 1388 * 1389 * The map must be locked, and leaves it so. 1390 * 1391 * Returns: 0 on success, and starting address in *addr, 1392 * 1 if insufficient space. 1393 */ 1394 int 1395 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1396 vm_offset_t *addr) /* OUT */ 1397 { 1398 vm_map_entry_t entry; 1399 vm_offset_t st; 1400 1401 /* 1402 * Request must fit within min/max VM address and must avoid 1403 * address wrap. 1404 */ 1405 start = MAX(start, vm_map_min(map)); 1406 if (start + length > vm_map_max(map) || start + length < start) 1407 return (1); 1408 1409 /* Empty tree means wide open address space. */ 1410 if (map->root == NULL) { 1411 *addr = start; 1412 return (0); 1413 } 1414 1415 /* 1416 * After splay, if start comes before root node, then there 1417 * must be a gap from start to the root. 1418 */ 1419 map->root = vm_map_entry_splay(start, map->root); 1420 if (start + length <= map->root->start) { 1421 *addr = start; 1422 return (0); 1423 } 1424 1425 /* 1426 * Root is the last node that might begin its gap before 1427 * start, and this is the last comparison where address 1428 * wrap might be a problem. 1429 */ 1430 st = (start > map->root->end) ? start : map->root->end; 1431 if (length <= map->root->end + map->root->adj_free - st) { 1432 *addr = st; 1433 return (0); 1434 } 1435 1436 /* With max_free, can immediately tell if no solution. */ 1437 entry = map->root->right; 1438 if (entry == NULL || length > entry->max_free) 1439 return (1); 1440 1441 /* 1442 * Search the right subtree in the order: left subtree, root, 1443 * right subtree (first fit). The previous splay implies that 1444 * all regions in the right subtree have addresses > start. 1445 */ 1446 while (entry != NULL) { 1447 if (entry->left != NULL && entry->left->max_free >= length) 1448 entry = entry->left; 1449 else if (entry->adj_free >= length) { 1450 *addr = entry->end; 1451 return (0); 1452 } else 1453 entry = entry->right; 1454 } 1455 1456 /* Can't get here, so panic if we do. */ 1457 panic("vm_map_findspace: max_free corrupt"); 1458 } 1459 1460 int 1461 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1462 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1463 vm_prot_t max, int cow) 1464 { 1465 vm_offset_t end; 1466 int result; 1467 1468 end = start + length; 1469 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1470 object == NULL, 1471 ("vm_map_fixed: non-NULL backing object for stack")); 1472 vm_map_lock(map); 1473 VM_MAP_RANGE_CHECK(map, start, end); 1474 if ((cow & MAP_CHECK_EXCL) == 0) 1475 vm_map_delete(map, start, end); 1476 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1477 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1478 prot, max, cow); 1479 } else { 1480 result = vm_map_insert(map, object, offset, start, end, 1481 prot, max, cow); 1482 } 1483 vm_map_unlock(map); 1484 return (result); 1485 } 1486 1487 /* 1488 * Searches for the specified amount of free space in the given map with the 1489 * specified alignment. Performs an address-ordered, first-fit search from 1490 * the given address "*addr", with an optional upper bound "max_addr". If the 1491 * parameter "alignment" is zero, then the alignment is computed from the 1492 * given (object, offset) pair so as to enable the greatest possible use of 1493 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1494 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1495 * 1496 * The map must be locked. Initially, there must be at least "length" bytes 1497 * of free space at the given address. 1498 */ 1499 static int 1500 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1501 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1502 vm_offset_t alignment) 1503 { 1504 vm_offset_t aligned_addr, free_addr; 1505 1506 VM_MAP_ASSERT_LOCKED(map); 1507 free_addr = *addr; 1508 KASSERT(!vm_map_findspace(map, free_addr, length, addr) && 1509 free_addr == *addr, ("caller provided insufficient free space")); 1510 for (;;) { 1511 /* 1512 * At the start of every iteration, the free space at address 1513 * "*addr" is at least "length" bytes. 1514 */ 1515 if (alignment == 0) 1516 pmap_align_superpage(object, offset, addr, length); 1517 else if ((*addr & (alignment - 1)) != 0) { 1518 *addr &= ~(alignment - 1); 1519 *addr += alignment; 1520 } 1521 aligned_addr = *addr; 1522 if (aligned_addr == free_addr) { 1523 /* 1524 * Alignment did not change "*addr", so "*addr" must 1525 * still provide sufficient free space. 1526 */ 1527 return (KERN_SUCCESS); 1528 } 1529 1530 /* 1531 * Test for address wrap on "*addr". A wrapped "*addr" could 1532 * be a valid address, in which case vm_map_findspace() cannot 1533 * be relied upon to fail. 1534 */ 1535 if (aligned_addr < free_addr || 1536 vm_map_findspace(map, aligned_addr, length, addr) || 1537 (max_addr != 0 && *addr + length > max_addr)) 1538 return (KERN_NO_SPACE); 1539 free_addr = *addr; 1540 if (free_addr == aligned_addr) { 1541 /* 1542 * If a successful call to vm_map_findspace() did not 1543 * change "*addr", then "*addr" must still be aligned 1544 * and provide sufficient free space. 1545 */ 1546 return (KERN_SUCCESS); 1547 } 1548 } 1549 } 1550 1551 /* 1552 * vm_map_find finds an unallocated region in the target address 1553 * map with the given length. The search is defined to be 1554 * first-fit from the specified address; the region found is 1555 * returned in the same parameter. 1556 * 1557 * If object is non-NULL, ref count must be bumped by caller 1558 * prior to making call to account for the new entry. 1559 */ 1560 int 1561 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1562 vm_offset_t *addr, /* IN/OUT */ 1563 vm_size_t length, vm_offset_t max_addr, int find_space, 1564 vm_prot_t prot, vm_prot_t max, int cow) 1565 { 1566 vm_offset_t alignment, min_addr; 1567 int rv; 1568 1569 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1570 object == NULL, 1571 ("vm_map_find: non-NULL backing object for stack")); 1572 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1573 (object->flags & OBJ_COLORED) == 0)) 1574 find_space = VMFS_ANY_SPACE; 1575 if (find_space >> 8 != 0) { 1576 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1577 alignment = (vm_offset_t)1 << (find_space >> 8); 1578 } else 1579 alignment = 0; 1580 vm_map_lock(map); 1581 if (find_space != VMFS_NO_SPACE) { 1582 KASSERT(find_space == VMFS_ANY_SPACE || 1583 find_space == VMFS_OPTIMAL_SPACE || 1584 find_space == VMFS_SUPER_SPACE || 1585 alignment != 0, ("unexpected VMFS flag")); 1586 min_addr = *addr; 1587 again: 1588 if (vm_map_findspace(map, min_addr, length, addr) || 1589 (max_addr != 0 && *addr + length > max_addr)) { 1590 rv = KERN_NO_SPACE; 1591 goto done; 1592 } 1593 if (find_space != VMFS_ANY_SPACE && 1594 (rv = vm_map_alignspace(map, object, offset, addr, length, 1595 max_addr, alignment)) != KERN_SUCCESS) { 1596 if (find_space == VMFS_OPTIMAL_SPACE) { 1597 find_space = VMFS_ANY_SPACE; 1598 goto again; 1599 } 1600 goto done; 1601 } 1602 } 1603 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1604 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1605 max, cow); 1606 } else { 1607 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1608 prot, max, cow); 1609 } 1610 done: 1611 vm_map_unlock(map); 1612 return (rv); 1613 } 1614 1615 /* 1616 * vm_map_find_min() is a variant of vm_map_find() that takes an 1617 * additional parameter (min_addr) and treats the given address 1618 * (*addr) differently. Specifically, it treats *addr as a hint 1619 * and not as the minimum address where the mapping is created. 1620 * 1621 * This function works in two phases. First, it tries to 1622 * allocate above the hint. If that fails and the hint is 1623 * greater than min_addr, it performs a second pass, replacing 1624 * the hint with min_addr as the minimum address for the 1625 * allocation. 1626 */ 1627 int 1628 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1629 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 1630 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 1631 int cow) 1632 { 1633 vm_offset_t hint; 1634 int rv; 1635 1636 hint = *addr; 1637 for (;;) { 1638 rv = vm_map_find(map, object, offset, addr, length, max_addr, 1639 find_space, prot, max, cow); 1640 if (rv == KERN_SUCCESS || min_addr >= hint) 1641 return (rv); 1642 *addr = hint = min_addr; 1643 } 1644 } 1645 1646 static bool 1647 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 1648 { 1649 vm_size_t prevsize; 1650 1651 prevsize = prev->end - prev->start; 1652 return (prev->end == entry->start && 1653 prev->object.vm_object == entry->object.vm_object && 1654 (prev->object.vm_object == NULL || 1655 prev->offset + prevsize == entry->offset) && 1656 prev->eflags == entry->eflags && 1657 prev->protection == entry->protection && 1658 prev->max_protection == entry->max_protection && 1659 prev->inheritance == entry->inheritance && 1660 prev->wired_count == entry->wired_count && 1661 prev->cred == entry->cred); 1662 } 1663 1664 static void 1665 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 1666 { 1667 1668 /* 1669 * If the backing object is a vnode object, 1670 * vm_object_deallocate() calls vrele(). 1671 * However, vrele() does not lock the vnode 1672 * because the vnode has additional 1673 * references. Thus, the map lock can be kept 1674 * without causing a lock-order reversal with 1675 * the vnode lock. 1676 * 1677 * Since we count the number of virtual page 1678 * mappings in object->un_pager.vnp.writemappings, 1679 * the writemappings value should not be adjusted 1680 * when the entry is disposed of. 1681 */ 1682 if (entry->object.vm_object != NULL) 1683 vm_object_deallocate(entry->object.vm_object); 1684 if (entry->cred != NULL) 1685 crfree(entry->cred); 1686 vm_map_entry_dispose(map, entry); 1687 } 1688 1689 /* 1690 * vm_map_simplify_entry: 1691 * 1692 * Simplify the given map entry by merging with either neighbor. This 1693 * routine also has the ability to merge with both neighbors. 1694 * 1695 * The map must be locked. 1696 * 1697 * This routine guarantees that the passed entry remains valid (though 1698 * possibly extended). When merging, this routine may delete one or 1699 * both neighbors. 1700 */ 1701 void 1702 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1703 { 1704 vm_map_entry_t next, prev; 1705 1706 if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | 1707 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0) 1708 return; 1709 1710 prev = entry->prev; 1711 if (prev != &map->header && 1712 vm_map_mergeable_neighbors(prev, entry)) { 1713 vm_map_entry_unlink(map, prev); 1714 entry->start = prev->start; 1715 entry->offset = prev->offset; 1716 if (entry->prev != &map->header) 1717 vm_map_entry_resize_free(map, entry->prev); 1718 vm_map_merged_neighbor_dispose(map, prev); 1719 } 1720 1721 next = entry->next; 1722 if (next != &map->header && 1723 vm_map_mergeable_neighbors(entry, next)) { 1724 vm_map_entry_unlink(map, next); 1725 entry->end = next->end; 1726 vm_map_entry_resize_free(map, entry); 1727 vm_map_merged_neighbor_dispose(map, next); 1728 } 1729 } 1730 /* 1731 * vm_map_clip_start: [ internal use only ] 1732 * 1733 * Asserts that the given entry begins at or after 1734 * the specified address; if necessary, 1735 * it splits the entry into two. 1736 */ 1737 #define vm_map_clip_start(map, entry, startaddr) \ 1738 { \ 1739 if (startaddr > entry->start) \ 1740 _vm_map_clip_start(map, entry, startaddr); \ 1741 } 1742 1743 /* 1744 * This routine is called only when it is known that 1745 * the entry must be split. 1746 */ 1747 static void 1748 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1749 { 1750 vm_map_entry_t new_entry; 1751 1752 VM_MAP_ASSERT_LOCKED(map); 1753 KASSERT(entry->end > start && entry->start < start, 1754 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 1755 1756 /* 1757 * Split off the front portion -- note that we must insert the new 1758 * entry BEFORE this one, so that this entry has the specified 1759 * starting address. 1760 */ 1761 vm_map_simplify_entry(map, entry); 1762 1763 /* 1764 * If there is no object backing this entry, we might as well create 1765 * one now. If we defer it, an object can get created after the map 1766 * is clipped, and individual objects will be created for the split-up 1767 * map. This is a bit of a hack, but is also about the best place to 1768 * put this improvement. 1769 */ 1770 if (entry->object.vm_object == NULL && !map->system_map && 1771 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1772 vm_object_t object; 1773 object = vm_object_allocate(OBJT_DEFAULT, 1774 atop(entry->end - entry->start)); 1775 entry->object.vm_object = object; 1776 entry->offset = 0; 1777 if (entry->cred != NULL) { 1778 object->cred = entry->cred; 1779 object->charge = entry->end - entry->start; 1780 entry->cred = NULL; 1781 } 1782 } else if (entry->object.vm_object != NULL && 1783 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1784 entry->cred != NULL) { 1785 VM_OBJECT_WLOCK(entry->object.vm_object); 1786 KASSERT(entry->object.vm_object->cred == NULL, 1787 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); 1788 entry->object.vm_object->cred = entry->cred; 1789 entry->object.vm_object->charge = entry->end - entry->start; 1790 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1791 entry->cred = NULL; 1792 } 1793 1794 new_entry = vm_map_entry_create(map); 1795 *new_entry = *entry; 1796 1797 new_entry->end = start; 1798 entry->offset += (start - entry->start); 1799 entry->start = start; 1800 if (new_entry->cred != NULL) 1801 crhold(entry->cred); 1802 1803 vm_map_entry_link(map, entry->prev, new_entry); 1804 1805 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1806 vm_object_reference(new_entry->object.vm_object); 1807 /* 1808 * The object->un_pager.vnp.writemappings for the 1809 * object of MAP_ENTRY_VN_WRITECNT type entry shall be 1810 * kept as is here. The virtual pages are 1811 * re-distributed among the clipped entries, so the sum is 1812 * left the same. 1813 */ 1814 } 1815 } 1816 1817 /* 1818 * vm_map_clip_end: [ internal use only ] 1819 * 1820 * Asserts that the given entry ends at or before 1821 * the specified address; if necessary, 1822 * it splits the entry into two. 1823 */ 1824 #define vm_map_clip_end(map, entry, endaddr) \ 1825 { \ 1826 if ((endaddr) < (entry->end)) \ 1827 _vm_map_clip_end((map), (entry), (endaddr)); \ 1828 } 1829 1830 /* 1831 * This routine is called only when it is known that 1832 * the entry must be split. 1833 */ 1834 static void 1835 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1836 { 1837 vm_map_entry_t new_entry; 1838 1839 VM_MAP_ASSERT_LOCKED(map); 1840 KASSERT(entry->start < end && entry->end > end, 1841 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 1842 1843 /* 1844 * If there is no object backing this entry, we might as well create 1845 * one now. If we defer it, an object can get created after the map 1846 * is clipped, and individual objects will be created for the split-up 1847 * map. This is a bit of a hack, but is also about the best place to 1848 * put this improvement. 1849 */ 1850 if (entry->object.vm_object == NULL && !map->system_map && 1851 (entry->eflags & MAP_ENTRY_GUARD) == 0) { 1852 vm_object_t object; 1853 object = vm_object_allocate(OBJT_DEFAULT, 1854 atop(entry->end - entry->start)); 1855 entry->object.vm_object = object; 1856 entry->offset = 0; 1857 if (entry->cred != NULL) { 1858 object->cred = entry->cred; 1859 object->charge = entry->end - entry->start; 1860 entry->cred = NULL; 1861 } 1862 } else if (entry->object.vm_object != NULL && 1863 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 1864 entry->cred != NULL) { 1865 VM_OBJECT_WLOCK(entry->object.vm_object); 1866 KASSERT(entry->object.vm_object->cred == NULL, 1867 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); 1868 entry->object.vm_object->cred = entry->cred; 1869 entry->object.vm_object->charge = entry->end - entry->start; 1870 VM_OBJECT_WUNLOCK(entry->object.vm_object); 1871 entry->cred = NULL; 1872 } 1873 1874 /* 1875 * Create a new entry and insert it AFTER the specified entry 1876 */ 1877 new_entry = vm_map_entry_create(map); 1878 *new_entry = *entry; 1879 1880 new_entry->start = entry->end = end; 1881 new_entry->offset += (end - entry->start); 1882 if (new_entry->cred != NULL) 1883 crhold(entry->cred); 1884 1885 vm_map_entry_link(map, entry, new_entry); 1886 1887 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1888 vm_object_reference(new_entry->object.vm_object); 1889 } 1890 } 1891 1892 /* 1893 * vm_map_submap: [ kernel use only ] 1894 * 1895 * Mark the given range as handled by a subordinate map. 1896 * 1897 * This range must have been created with vm_map_find, 1898 * and no other operations may have been performed on this 1899 * range prior to calling vm_map_submap. 1900 * 1901 * Only a limited number of operations can be performed 1902 * within this rage after calling vm_map_submap: 1903 * vm_fault 1904 * [Don't try vm_map_copy!] 1905 * 1906 * To remove a submapping, one must first remove the 1907 * range from the superior map, and then destroy the 1908 * submap (if desired). [Better yet, don't try it.] 1909 */ 1910 int 1911 vm_map_submap( 1912 vm_map_t map, 1913 vm_offset_t start, 1914 vm_offset_t end, 1915 vm_map_t submap) 1916 { 1917 vm_map_entry_t entry; 1918 int result = KERN_INVALID_ARGUMENT; 1919 1920 vm_map_lock(map); 1921 1922 VM_MAP_RANGE_CHECK(map, start, end); 1923 1924 if (vm_map_lookup_entry(map, start, &entry)) { 1925 vm_map_clip_start(map, entry, start); 1926 } else 1927 entry = entry->next; 1928 1929 vm_map_clip_end(map, entry, end); 1930 1931 if ((entry->start == start) && (entry->end == end) && 1932 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1933 (entry->object.vm_object == NULL)) { 1934 entry->object.sub_map = submap; 1935 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1936 result = KERN_SUCCESS; 1937 } 1938 vm_map_unlock(map); 1939 1940 return (result); 1941 } 1942 1943 /* 1944 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 1945 */ 1946 #define MAX_INIT_PT 96 1947 1948 /* 1949 * vm_map_pmap_enter: 1950 * 1951 * Preload the specified map's pmap with mappings to the specified 1952 * object's memory-resident pages. No further physical pages are 1953 * allocated, and no further virtual pages are retrieved from secondary 1954 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 1955 * limited number of page mappings are created at the low-end of the 1956 * specified address range. (For this purpose, a superpage mapping 1957 * counts as one page mapping.) Otherwise, all resident pages within 1958 * the specified address range are mapped. 1959 */ 1960 static void 1961 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1962 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1963 { 1964 vm_offset_t start; 1965 vm_page_t p, p_start; 1966 vm_pindex_t mask, psize, threshold, tmpidx; 1967 1968 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1969 return; 1970 VM_OBJECT_RLOCK(object); 1971 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1972 VM_OBJECT_RUNLOCK(object); 1973 VM_OBJECT_WLOCK(object); 1974 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 1975 pmap_object_init_pt(map->pmap, addr, object, pindex, 1976 size); 1977 VM_OBJECT_WUNLOCK(object); 1978 return; 1979 } 1980 VM_OBJECT_LOCK_DOWNGRADE(object); 1981 } 1982 1983 psize = atop(size); 1984 if (psize + pindex > object->size) { 1985 if (object->size < pindex) { 1986 VM_OBJECT_RUNLOCK(object); 1987 return; 1988 } 1989 psize = object->size - pindex; 1990 } 1991 1992 start = 0; 1993 p_start = NULL; 1994 threshold = MAX_INIT_PT; 1995 1996 p = vm_page_find_least(object, pindex); 1997 /* 1998 * Assert: the variable p is either (1) the page with the 1999 * least pindex greater than or equal to the parameter pindex 2000 * or (2) NULL. 2001 */ 2002 for (; 2003 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2004 p = TAILQ_NEXT(p, listq)) { 2005 /* 2006 * don't allow an madvise to blow away our really 2007 * free pages allocating pv entries. 2008 */ 2009 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2010 vm_page_count_severe()) || 2011 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2012 tmpidx >= threshold)) { 2013 psize = tmpidx; 2014 break; 2015 } 2016 if (p->valid == VM_PAGE_BITS_ALL) { 2017 if (p_start == NULL) { 2018 start = addr + ptoa(tmpidx); 2019 p_start = p; 2020 } 2021 /* Jump ahead if a superpage mapping is possible. */ 2022 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2023 (pagesizes[p->psind] - 1)) == 0) { 2024 mask = atop(pagesizes[p->psind]) - 1; 2025 if (tmpidx + mask < psize && 2026 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2027 p += mask; 2028 threshold += mask; 2029 } 2030 } 2031 } else if (p_start != NULL) { 2032 pmap_enter_object(map->pmap, start, addr + 2033 ptoa(tmpidx), p_start, prot); 2034 p_start = NULL; 2035 } 2036 } 2037 if (p_start != NULL) 2038 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2039 p_start, prot); 2040 VM_OBJECT_RUNLOCK(object); 2041 } 2042 2043 /* 2044 * vm_map_protect: 2045 * 2046 * Sets the protection of the specified address 2047 * region in the target map. If "set_max" is 2048 * specified, the maximum protection is to be set; 2049 * otherwise, only the current protection is affected. 2050 */ 2051 int 2052 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2053 vm_prot_t new_prot, boolean_t set_max) 2054 { 2055 vm_map_entry_t current, entry; 2056 vm_object_t obj; 2057 struct ucred *cred; 2058 vm_prot_t old_prot; 2059 2060 if (start == end) 2061 return (KERN_SUCCESS); 2062 2063 vm_map_lock(map); 2064 2065 /* 2066 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2067 * need to fault pages into the map and will drop the map lock while 2068 * doing so, and the VM object may end up in an inconsistent state if we 2069 * update the protection on the map entry in between faults. 2070 */ 2071 vm_map_wait_busy(map); 2072 2073 VM_MAP_RANGE_CHECK(map, start, end); 2074 2075 if (vm_map_lookup_entry(map, start, &entry)) { 2076 vm_map_clip_start(map, entry, start); 2077 } else { 2078 entry = entry->next; 2079 } 2080 2081 /* 2082 * Make a first pass to check for protection violations. 2083 */ 2084 for (current = entry; current->start < end; current = current->next) { 2085 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2086 continue; 2087 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2088 vm_map_unlock(map); 2089 return (KERN_INVALID_ARGUMENT); 2090 } 2091 if ((new_prot & current->max_protection) != new_prot) { 2092 vm_map_unlock(map); 2093 return (KERN_PROTECTION_FAILURE); 2094 } 2095 } 2096 2097 /* 2098 * Do an accounting pass for private read-only mappings that 2099 * now will do cow due to allowed write (e.g. debugger sets 2100 * breakpoint on text segment) 2101 */ 2102 for (current = entry; current->start < end; current = current->next) { 2103 2104 vm_map_clip_end(map, current, end); 2105 2106 if (set_max || 2107 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2108 ENTRY_CHARGED(current) || 2109 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2110 continue; 2111 } 2112 2113 cred = curthread->td_ucred; 2114 obj = current->object.vm_object; 2115 2116 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2117 if (!swap_reserve(current->end - current->start)) { 2118 vm_map_unlock(map); 2119 return (KERN_RESOURCE_SHORTAGE); 2120 } 2121 crhold(cred); 2122 current->cred = cred; 2123 continue; 2124 } 2125 2126 VM_OBJECT_WLOCK(obj); 2127 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2128 VM_OBJECT_WUNLOCK(obj); 2129 continue; 2130 } 2131 2132 /* 2133 * Charge for the whole object allocation now, since 2134 * we cannot distinguish between non-charged and 2135 * charged clipped mapping of the same object later. 2136 */ 2137 KASSERT(obj->charge == 0, 2138 ("vm_map_protect: object %p overcharged (entry %p)", 2139 obj, current)); 2140 if (!swap_reserve(ptoa(obj->size))) { 2141 VM_OBJECT_WUNLOCK(obj); 2142 vm_map_unlock(map); 2143 return (KERN_RESOURCE_SHORTAGE); 2144 } 2145 2146 crhold(cred); 2147 obj->cred = cred; 2148 obj->charge = ptoa(obj->size); 2149 VM_OBJECT_WUNLOCK(obj); 2150 } 2151 2152 /* 2153 * Go back and fix up protections. [Note that clipping is not 2154 * necessary the second time.] 2155 */ 2156 for (current = entry; current->start < end; current = current->next) { 2157 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2158 continue; 2159 2160 old_prot = current->protection; 2161 2162 if (set_max) 2163 current->protection = 2164 (current->max_protection = new_prot) & 2165 old_prot; 2166 else 2167 current->protection = new_prot; 2168 2169 /* 2170 * For user wired map entries, the normal lazy evaluation of 2171 * write access upgrades through soft page faults is 2172 * undesirable. Instead, immediately copy any pages that are 2173 * copy-on-write and enable write access in the physical map. 2174 */ 2175 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2176 (current->protection & VM_PROT_WRITE) != 0 && 2177 (old_prot & VM_PROT_WRITE) == 0) 2178 vm_fault_copy_entry(map, map, current, current, NULL); 2179 2180 /* 2181 * When restricting access, update the physical map. Worry 2182 * about copy-on-write here. 2183 */ 2184 if ((old_prot & ~current->protection) != 0) { 2185 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2186 VM_PROT_ALL) 2187 pmap_protect(map->pmap, current->start, 2188 current->end, 2189 current->protection & MASK(current)); 2190 #undef MASK 2191 } 2192 vm_map_simplify_entry(map, current); 2193 } 2194 vm_map_unlock(map); 2195 return (KERN_SUCCESS); 2196 } 2197 2198 /* 2199 * vm_map_madvise: 2200 * 2201 * This routine traverses a processes map handling the madvise 2202 * system call. Advisories are classified as either those effecting 2203 * the vm_map_entry structure, or those effecting the underlying 2204 * objects. 2205 */ 2206 int 2207 vm_map_madvise( 2208 vm_map_t map, 2209 vm_offset_t start, 2210 vm_offset_t end, 2211 int behav) 2212 { 2213 vm_map_entry_t current, entry; 2214 bool modify_map; 2215 2216 /* 2217 * Some madvise calls directly modify the vm_map_entry, in which case 2218 * we need to use an exclusive lock on the map and we need to perform 2219 * various clipping operations. Otherwise we only need a read-lock 2220 * on the map. 2221 */ 2222 switch(behav) { 2223 case MADV_NORMAL: 2224 case MADV_SEQUENTIAL: 2225 case MADV_RANDOM: 2226 case MADV_NOSYNC: 2227 case MADV_AUTOSYNC: 2228 case MADV_NOCORE: 2229 case MADV_CORE: 2230 if (start == end) 2231 return (0); 2232 modify_map = true; 2233 vm_map_lock(map); 2234 break; 2235 case MADV_WILLNEED: 2236 case MADV_DONTNEED: 2237 case MADV_FREE: 2238 if (start == end) 2239 return (0); 2240 modify_map = false; 2241 vm_map_lock_read(map); 2242 break; 2243 default: 2244 return (EINVAL); 2245 } 2246 2247 /* 2248 * Locate starting entry and clip if necessary. 2249 */ 2250 VM_MAP_RANGE_CHECK(map, start, end); 2251 2252 if (vm_map_lookup_entry(map, start, &entry)) { 2253 if (modify_map) 2254 vm_map_clip_start(map, entry, start); 2255 } else { 2256 entry = entry->next; 2257 } 2258 2259 if (modify_map) { 2260 /* 2261 * madvise behaviors that are implemented in the vm_map_entry. 2262 * 2263 * We clip the vm_map_entry so that behavioral changes are 2264 * limited to the specified address range. 2265 */ 2266 for (current = entry; current->start < end; 2267 current = current->next) { 2268 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2269 continue; 2270 2271 vm_map_clip_end(map, current, end); 2272 2273 switch (behav) { 2274 case MADV_NORMAL: 2275 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2276 break; 2277 case MADV_SEQUENTIAL: 2278 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2279 break; 2280 case MADV_RANDOM: 2281 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2282 break; 2283 case MADV_NOSYNC: 2284 current->eflags |= MAP_ENTRY_NOSYNC; 2285 break; 2286 case MADV_AUTOSYNC: 2287 current->eflags &= ~MAP_ENTRY_NOSYNC; 2288 break; 2289 case MADV_NOCORE: 2290 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2291 break; 2292 case MADV_CORE: 2293 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2294 break; 2295 default: 2296 break; 2297 } 2298 vm_map_simplify_entry(map, current); 2299 } 2300 vm_map_unlock(map); 2301 } else { 2302 vm_pindex_t pstart, pend; 2303 2304 /* 2305 * madvise behaviors that are implemented in the underlying 2306 * vm_object. 2307 * 2308 * Since we don't clip the vm_map_entry, we have to clip 2309 * the vm_object pindex and count. 2310 */ 2311 for (current = entry; current->start < end; 2312 current = current->next) { 2313 vm_offset_t useEnd, useStart; 2314 2315 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2316 continue; 2317 2318 pstart = OFF_TO_IDX(current->offset); 2319 pend = pstart + atop(current->end - current->start); 2320 useStart = current->start; 2321 useEnd = current->end; 2322 2323 if (current->start < start) { 2324 pstart += atop(start - current->start); 2325 useStart = start; 2326 } 2327 if (current->end > end) { 2328 pend -= atop(current->end - end); 2329 useEnd = end; 2330 } 2331 2332 if (pstart >= pend) 2333 continue; 2334 2335 /* 2336 * Perform the pmap_advise() before clearing 2337 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2338 * concurrent pmap operation, such as pmap_remove(), 2339 * could clear a reference in the pmap and set 2340 * PGA_REFERENCED on the page before the pmap_advise() 2341 * had completed. Consequently, the page would appear 2342 * referenced based upon an old reference that 2343 * occurred before this pmap_advise() ran. 2344 */ 2345 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2346 pmap_advise(map->pmap, useStart, useEnd, 2347 behav); 2348 2349 vm_object_madvise(current->object.vm_object, pstart, 2350 pend, behav); 2351 2352 /* 2353 * Pre-populate paging structures in the 2354 * WILLNEED case. For wired entries, the 2355 * paging structures are already populated. 2356 */ 2357 if (behav == MADV_WILLNEED && 2358 current->wired_count == 0) { 2359 vm_map_pmap_enter(map, 2360 useStart, 2361 current->protection, 2362 current->object.vm_object, 2363 pstart, 2364 ptoa(pend - pstart), 2365 MAP_PREFAULT_MADVISE 2366 ); 2367 } 2368 } 2369 vm_map_unlock_read(map); 2370 } 2371 return (0); 2372 } 2373 2374 2375 /* 2376 * vm_map_inherit: 2377 * 2378 * Sets the inheritance of the specified address 2379 * range in the target map. Inheritance 2380 * affects how the map will be shared with 2381 * child maps at the time of vmspace_fork. 2382 */ 2383 int 2384 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2385 vm_inherit_t new_inheritance) 2386 { 2387 vm_map_entry_t entry; 2388 vm_map_entry_t temp_entry; 2389 2390 switch (new_inheritance) { 2391 case VM_INHERIT_NONE: 2392 case VM_INHERIT_COPY: 2393 case VM_INHERIT_SHARE: 2394 case VM_INHERIT_ZERO: 2395 break; 2396 default: 2397 return (KERN_INVALID_ARGUMENT); 2398 } 2399 if (start == end) 2400 return (KERN_SUCCESS); 2401 vm_map_lock(map); 2402 VM_MAP_RANGE_CHECK(map, start, end); 2403 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2404 entry = temp_entry; 2405 vm_map_clip_start(map, entry, start); 2406 } else 2407 entry = temp_entry->next; 2408 while (entry->start < end) { 2409 vm_map_clip_end(map, entry, end); 2410 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2411 new_inheritance != VM_INHERIT_ZERO) 2412 entry->inheritance = new_inheritance; 2413 vm_map_simplify_entry(map, entry); 2414 entry = entry->next; 2415 } 2416 vm_map_unlock(map); 2417 return (KERN_SUCCESS); 2418 } 2419 2420 /* 2421 * vm_map_unwire: 2422 * 2423 * Implements both kernel and user unwiring. 2424 */ 2425 int 2426 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2427 int flags) 2428 { 2429 vm_map_entry_t entry, first_entry, tmp_entry; 2430 vm_offset_t saved_start; 2431 unsigned int last_timestamp; 2432 int rv; 2433 boolean_t need_wakeup, result, user_unwire; 2434 2435 if (start == end) 2436 return (KERN_SUCCESS); 2437 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2438 vm_map_lock(map); 2439 VM_MAP_RANGE_CHECK(map, start, end); 2440 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2441 if (flags & VM_MAP_WIRE_HOLESOK) 2442 first_entry = first_entry->next; 2443 else { 2444 vm_map_unlock(map); 2445 return (KERN_INVALID_ADDRESS); 2446 } 2447 } 2448 last_timestamp = map->timestamp; 2449 entry = first_entry; 2450 while (entry->start < end) { 2451 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2452 /* 2453 * We have not yet clipped the entry. 2454 */ 2455 saved_start = (start >= entry->start) ? start : 2456 entry->start; 2457 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2458 if (vm_map_unlock_and_wait(map, 0)) { 2459 /* 2460 * Allow interruption of user unwiring? 2461 */ 2462 } 2463 vm_map_lock(map); 2464 if (last_timestamp+1 != map->timestamp) { 2465 /* 2466 * Look again for the entry because the map was 2467 * modified while it was unlocked. 2468 * Specifically, the entry may have been 2469 * clipped, merged, or deleted. 2470 */ 2471 if (!vm_map_lookup_entry(map, saved_start, 2472 &tmp_entry)) { 2473 if (flags & VM_MAP_WIRE_HOLESOK) 2474 tmp_entry = tmp_entry->next; 2475 else { 2476 if (saved_start == start) { 2477 /* 2478 * First_entry has been deleted. 2479 */ 2480 vm_map_unlock(map); 2481 return (KERN_INVALID_ADDRESS); 2482 } 2483 end = saved_start; 2484 rv = KERN_INVALID_ADDRESS; 2485 goto done; 2486 } 2487 } 2488 if (entry == first_entry) 2489 first_entry = tmp_entry; 2490 else 2491 first_entry = NULL; 2492 entry = tmp_entry; 2493 } 2494 last_timestamp = map->timestamp; 2495 continue; 2496 } 2497 vm_map_clip_start(map, entry, start); 2498 vm_map_clip_end(map, entry, end); 2499 /* 2500 * Mark the entry in case the map lock is released. (See 2501 * above.) 2502 */ 2503 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2504 entry->wiring_thread == NULL, 2505 ("owned map entry %p", entry)); 2506 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2507 entry->wiring_thread = curthread; 2508 /* 2509 * Check the map for holes in the specified region. 2510 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2511 */ 2512 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2513 (entry->end < end && entry->next->start > entry->end)) { 2514 end = entry->end; 2515 rv = KERN_INVALID_ADDRESS; 2516 goto done; 2517 } 2518 /* 2519 * If system unwiring, require that the entry is system wired. 2520 */ 2521 if (!user_unwire && 2522 vm_map_entry_system_wired_count(entry) == 0) { 2523 end = entry->end; 2524 rv = KERN_INVALID_ARGUMENT; 2525 goto done; 2526 } 2527 entry = entry->next; 2528 } 2529 rv = KERN_SUCCESS; 2530 done: 2531 need_wakeup = FALSE; 2532 if (first_entry == NULL) { 2533 result = vm_map_lookup_entry(map, start, &first_entry); 2534 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2535 first_entry = first_entry->next; 2536 else 2537 KASSERT(result, ("vm_map_unwire: lookup failed")); 2538 } 2539 for (entry = first_entry; entry->start < end; entry = entry->next) { 2540 /* 2541 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2542 * space in the unwired region could have been mapped 2543 * while the map lock was dropped for draining 2544 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2545 * could be simultaneously wiring this new mapping 2546 * entry. Detect these cases and skip any entries 2547 * marked as in transition by us. 2548 */ 2549 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2550 entry->wiring_thread != curthread) { 2551 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2552 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2553 continue; 2554 } 2555 2556 if (rv == KERN_SUCCESS && (!user_unwire || 2557 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2558 if (user_unwire) 2559 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2560 if (entry->wired_count == 1) 2561 vm_map_entry_unwire(map, entry); 2562 else 2563 entry->wired_count--; 2564 } 2565 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2566 ("vm_map_unwire: in-transition flag missing %p", entry)); 2567 KASSERT(entry->wiring_thread == curthread, 2568 ("vm_map_unwire: alien wire %p", entry)); 2569 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2570 entry->wiring_thread = NULL; 2571 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2572 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2573 need_wakeup = TRUE; 2574 } 2575 vm_map_simplify_entry(map, entry); 2576 } 2577 vm_map_unlock(map); 2578 if (need_wakeup) 2579 vm_map_wakeup(map); 2580 return (rv); 2581 } 2582 2583 /* 2584 * vm_map_wire_entry_failure: 2585 * 2586 * Handle a wiring failure on the given entry. 2587 * 2588 * The map should be locked. 2589 */ 2590 static void 2591 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 2592 vm_offset_t failed_addr) 2593 { 2594 2595 VM_MAP_ASSERT_LOCKED(map); 2596 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 2597 entry->wired_count == 1, 2598 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 2599 KASSERT(failed_addr < entry->end, 2600 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 2601 2602 /* 2603 * If any pages at the start of this entry were successfully wired, 2604 * then unwire them. 2605 */ 2606 if (failed_addr > entry->start) { 2607 pmap_unwire(map->pmap, entry->start, failed_addr); 2608 vm_object_unwire(entry->object.vm_object, entry->offset, 2609 failed_addr - entry->start, PQ_ACTIVE); 2610 } 2611 2612 /* 2613 * Assign an out-of-range value to represent the failure to wire this 2614 * entry. 2615 */ 2616 entry->wired_count = -1; 2617 } 2618 2619 /* 2620 * vm_map_wire: 2621 * 2622 * Implements both kernel and user wiring. 2623 */ 2624 int 2625 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2626 int flags) 2627 { 2628 vm_map_entry_t entry, first_entry, tmp_entry; 2629 vm_offset_t faddr, saved_end, saved_start; 2630 unsigned int last_timestamp; 2631 int rv; 2632 boolean_t need_wakeup, result, user_wire; 2633 vm_prot_t prot; 2634 2635 if (start == end) 2636 return (KERN_SUCCESS); 2637 prot = 0; 2638 if (flags & VM_MAP_WIRE_WRITE) 2639 prot |= VM_PROT_WRITE; 2640 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 2641 vm_map_lock(map); 2642 VM_MAP_RANGE_CHECK(map, start, end); 2643 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2644 if (flags & VM_MAP_WIRE_HOLESOK) 2645 first_entry = first_entry->next; 2646 else { 2647 vm_map_unlock(map); 2648 return (KERN_INVALID_ADDRESS); 2649 } 2650 } 2651 last_timestamp = map->timestamp; 2652 entry = first_entry; 2653 while (entry->start < end) { 2654 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2655 /* 2656 * We have not yet clipped the entry. 2657 */ 2658 saved_start = (start >= entry->start) ? start : 2659 entry->start; 2660 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2661 if (vm_map_unlock_and_wait(map, 0)) { 2662 /* 2663 * Allow interruption of user wiring? 2664 */ 2665 } 2666 vm_map_lock(map); 2667 if (last_timestamp + 1 != map->timestamp) { 2668 /* 2669 * Look again for the entry because the map was 2670 * modified while it was unlocked. 2671 * Specifically, the entry may have been 2672 * clipped, merged, or deleted. 2673 */ 2674 if (!vm_map_lookup_entry(map, saved_start, 2675 &tmp_entry)) { 2676 if (flags & VM_MAP_WIRE_HOLESOK) 2677 tmp_entry = tmp_entry->next; 2678 else { 2679 if (saved_start == start) { 2680 /* 2681 * first_entry has been deleted. 2682 */ 2683 vm_map_unlock(map); 2684 return (KERN_INVALID_ADDRESS); 2685 } 2686 end = saved_start; 2687 rv = KERN_INVALID_ADDRESS; 2688 goto done; 2689 } 2690 } 2691 if (entry == first_entry) 2692 first_entry = tmp_entry; 2693 else 2694 first_entry = NULL; 2695 entry = tmp_entry; 2696 } 2697 last_timestamp = map->timestamp; 2698 continue; 2699 } 2700 vm_map_clip_start(map, entry, start); 2701 vm_map_clip_end(map, entry, end); 2702 /* 2703 * Mark the entry in case the map lock is released. (See 2704 * above.) 2705 */ 2706 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2707 entry->wiring_thread == NULL, 2708 ("owned map entry %p", entry)); 2709 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2710 entry->wiring_thread = curthread; 2711 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 2712 || (entry->protection & prot) != prot) { 2713 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 2714 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { 2715 end = entry->end; 2716 rv = KERN_INVALID_ADDRESS; 2717 goto done; 2718 } 2719 goto next_entry; 2720 } 2721 if (entry->wired_count == 0) { 2722 entry->wired_count++; 2723 saved_start = entry->start; 2724 saved_end = entry->end; 2725 2726 /* 2727 * Release the map lock, relying on the in-transition 2728 * mark. Mark the map busy for fork. 2729 */ 2730 vm_map_busy(map); 2731 vm_map_unlock(map); 2732 2733 faddr = saved_start; 2734 do { 2735 /* 2736 * Simulate a fault to get the page and enter 2737 * it into the physical map. 2738 */ 2739 if ((rv = vm_fault(map, faddr, VM_PROT_NONE, 2740 VM_FAULT_WIRE)) != KERN_SUCCESS) 2741 break; 2742 } while ((faddr += PAGE_SIZE) < saved_end); 2743 vm_map_lock(map); 2744 vm_map_unbusy(map); 2745 if (last_timestamp + 1 != map->timestamp) { 2746 /* 2747 * Look again for the entry because the map was 2748 * modified while it was unlocked. The entry 2749 * may have been clipped, but NOT merged or 2750 * deleted. 2751 */ 2752 result = vm_map_lookup_entry(map, saved_start, 2753 &tmp_entry); 2754 KASSERT(result, ("vm_map_wire: lookup failed")); 2755 if (entry == first_entry) 2756 first_entry = tmp_entry; 2757 else 2758 first_entry = NULL; 2759 entry = tmp_entry; 2760 while (entry->end < saved_end) { 2761 /* 2762 * In case of failure, handle entries 2763 * that were not fully wired here; 2764 * fully wired entries are handled 2765 * later. 2766 */ 2767 if (rv != KERN_SUCCESS && 2768 faddr < entry->end) 2769 vm_map_wire_entry_failure(map, 2770 entry, faddr); 2771 entry = entry->next; 2772 } 2773 } 2774 last_timestamp = map->timestamp; 2775 if (rv != KERN_SUCCESS) { 2776 vm_map_wire_entry_failure(map, entry, faddr); 2777 end = entry->end; 2778 goto done; 2779 } 2780 } else if (!user_wire || 2781 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2782 entry->wired_count++; 2783 } 2784 /* 2785 * Check the map for holes in the specified region. 2786 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2787 */ 2788 next_entry: 2789 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 && 2790 entry->end < end && entry->next->start > entry->end) { 2791 end = entry->end; 2792 rv = KERN_INVALID_ADDRESS; 2793 goto done; 2794 } 2795 entry = entry->next; 2796 } 2797 rv = KERN_SUCCESS; 2798 done: 2799 need_wakeup = FALSE; 2800 if (first_entry == NULL) { 2801 result = vm_map_lookup_entry(map, start, &first_entry); 2802 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2803 first_entry = first_entry->next; 2804 else 2805 KASSERT(result, ("vm_map_wire: lookup failed")); 2806 } 2807 for (entry = first_entry; entry->start < end; entry = entry->next) { 2808 /* 2809 * If VM_MAP_WIRE_HOLESOK was specified, an empty 2810 * space in the unwired region could have been mapped 2811 * while the map lock was dropped for faulting in the 2812 * pages or draining MAP_ENTRY_IN_TRANSITION. 2813 * Moreover, another thread could be simultaneously 2814 * wiring this new mapping entry. Detect these cases 2815 * and skip any entries marked as in transition not by us. 2816 */ 2817 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2818 entry->wiring_thread != curthread) { 2819 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0, 2820 ("vm_map_wire: !HOLESOK and new/changed entry")); 2821 continue; 2822 } 2823 2824 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) 2825 goto next_entry_done; 2826 2827 if (rv == KERN_SUCCESS) { 2828 if (user_wire) 2829 entry->eflags |= MAP_ENTRY_USER_WIRED; 2830 } else if (entry->wired_count == -1) { 2831 /* 2832 * Wiring failed on this entry. Thus, unwiring is 2833 * unnecessary. 2834 */ 2835 entry->wired_count = 0; 2836 } else if (!user_wire || 2837 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2838 /* 2839 * Undo the wiring. Wiring succeeded on this entry 2840 * but failed on a later entry. 2841 */ 2842 if (entry->wired_count == 1) 2843 vm_map_entry_unwire(map, entry); 2844 else 2845 entry->wired_count--; 2846 } 2847 next_entry_done: 2848 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2849 ("vm_map_wire: in-transition flag missing %p", entry)); 2850 KASSERT(entry->wiring_thread == curthread, 2851 ("vm_map_wire: alien wire %p", entry)); 2852 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 2853 MAP_ENTRY_WIRE_SKIPPED); 2854 entry->wiring_thread = NULL; 2855 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2856 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2857 need_wakeup = TRUE; 2858 } 2859 vm_map_simplify_entry(map, entry); 2860 } 2861 vm_map_unlock(map); 2862 if (need_wakeup) 2863 vm_map_wakeup(map); 2864 return (rv); 2865 } 2866 2867 /* 2868 * vm_map_sync 2869 * 2870 * Push any dirty cached pages in the address range to their pager. 2871 * If syncio is TRUE, dirty pages are written synchronously. 2872 * If invalidate is TRUE, any cached pages are freed as well. 2873 * 2874 * If the size of the region from start to end is zero, we are 2875 * supposed to flush all modified pages within the region containing 2876 * start. Unfortunately, a region can be split or coalesced with 2877 * neighboring regions, making it difficult to determine what the 2878 * original region was. Therefore, we approximate this requirement by 2879 * flushing the current region containing start. 2880 * 2881 * Returns an error if any part of the specified range is not mapped. 2882 */ 2883 int 2884 vm_map_sync( 2885 vm_map_t map, 2886 vm_offset_t start, 2887 vm_offset_t end, 2888 boolean_t syncio, 2889 boolean_t invalidate) 2890 { 2891 vm_map_entry_t current; 2892 vm_map_entry_t entry; 2893 vm_size_t size; 2894 vm_object_t object; 2895 vm_ooffset_t offset; 2896 unsigned int last_timestamp; 2897 boolean_t failed; 2898 2899 vm_map_lock_read(map); 2900 VM_MAP_RANGE_CHECK(map, start, end); 2901 if (!vm_map_lookup_entry(map, start, &entry)) { 2902 vm_map_unlock_read(map); 2903 return (KERN_INVALID_ADDRESS); 2904 } else if (start == end) { 2905 start = entry->start; 2906 end = entry->end; 2907 } 2908 /* 2909 * Make a first pass to check for user-wired memory and holes. 2910 */ 2911 for (current = entry; current->start < end; current = current->next) { 2912 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2913 vm_map_unlock_read(map); 2914 return (KERN_INVALID_ARGUMENT); 2915 } 2916 if (end > current->end && 2917 current->end != current->next->start) { 2918 vm_map_unlock_read(map); 2919 return (KERN_INVALID_ADDRESS); 2920 } 2921 } 2922 2923 if (invalidate) 2924 pmap_remove(map->pmap, start, end); 2925 failed = FALSE; 2926 2927 /* 2928 * Make a second pass, cleaning/uncaching pages from the indicated 2929 * objects as we go. 2930 */ 2931 for (current = entry; current->start < end;) { 2932 offset = current->offset + (start - current->start); 2933 size = (end <= current->end ? end : current->end) - start; 2934 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2935 vm_map_t smap; 2936 vm_map_entry_t tentry; 2937 vm_size_t tsize; 2938 2939 smap = current->object.sub_map; 2940 vm_map_lock_read(smap); 2941 (void) vm_map_lookup_entry(smap, offset, &tentry); 2942 tsize = tentry->end - offset; 2943 if (tsize < size) 2944 size = tsize; 2945 object = tentry->object.vm_object; 2946 offset = tentry->offset + (offset - tentry->start); 2947 vm_map_unlock_read(smap); 2948 } else { 2949 object = current->object.vm_object; 2950 } 2951 vm_object_reference(object); 2952 last_timestamp = map->timestamp; 2953 vm_map_unlock_read(map); 2954 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 2955 failed = TRUE; 2956 start += size; 2957 vm_object_deallocate(object); 2958 vm_map_lock_read(map); 2959 if (last_timestamp == map->timestamp || 2960 !vm_map_lookup_entry(map, start, ¤t)) 2961 current = current->next; 2962 } 2963 2964 vm_map_unlock_read(map); 2965 return (failed ? KERN_FAILURE : KERN_SUCCESS); 2966 } 2967 2968 /* 2969 * vm_map_entry_unwire: [ internal use only ] 2970 * 2971 * Make the region specified by this entry pageable. 2972 * 2973 * The map in question should be locked. 2974 * [This is the reason for this routine's existence.] 2975 */ 2976 static void 2977 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2978 { 2979 2980 VM_MAP_ASSERT_LOCKED(map); 2981 KASSERT(entry->wired_count > 0, 2982 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 2983 pmap_unwire(map->pmap, entry->start, entry->end); 2984 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end - 2985 entry->start, PQ_ACTIVE); 2986 entry->wired_count = 0; 2987 } 2988 2989 static void 2990 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 2991 { 2992 2993 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 2994 vm_object_deallocate(entry->object.vm_object); 2995 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 2996 } 2997 2998 /* 2999 * vm_map_entry_delete: [ internal use only ] 3000 * 3001 * Deallocate the given entry from the target map. 3002 */ 3003 static void 3004 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3005 { 3006 vm_object_t object; 3007 vm_pindex_t offidxstart, offidxend, count, size1; 3008 vm_size_t size; 3009 3010 vm_map_entry_unlink(map, entry); 3011 object = entry->object.vm_object; 3012 3013 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3014 MPASS(entry->cred == NULL); 3015 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3016 MPASS(object == NULL); 3017 vm_map_entry_deallocate(entry, map->system_map); 3018 return; 3019 } 3020 3021 size = entry->end - entry->start; 3022 map->size -= size; 3023 3024 if (entry->cred != NULL) { 3025 swap_release_by_cred(size, entry->cred); 3026 crfree(entry->cred); 3027 } 3028 3029 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 3030 (object != NULL)) { 3031 KASSERT(entry->cred == NULL || object->cred == NULL || 3032 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3033 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3034 count = atop(size); 3035 offidxstart = OFF_TO_IDX(entry->offset); 3036 offidxend = offidxstart + count; 3037 VM_OBJECT_WLOCK(object); 3038 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT | 3039 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 3040 object == kernel_object)) { 3041 vm_object_collapse(object); 3042 3043 /* 3044 * The option OBJPR_NOTMAPPED can be passed here 3045 * because vm_map_delete() already performed 3046 * pmap_remove() on the only mapping to this range 3047 * of pages. 3048 */ 3049 vm_object_page_remove(object, offidxstart, offidxend, 3050 OBJPR_NOTMAPPED); 3051 if (object->type == OBJT_SWAP) 3052 swap_pager_freespace(object, offidxstart, 3053 count); 3054 if (offidxend >= object->size && 3055 offidxstart < object->size) { 3056 size1 = object->size; 3057 object->size = offidxstart; 3058 if (object->cred != NULL) { 3059 size1 -= object->size; 3060 KASSERT(object->charge >= ptoa(size1), 3061 ("object %p charge < 0", object)); 3062 swap_release_by_cred(ptoa(size1), 3063 object->cred); 3064 object->charge -= ptoa(size1); 3065 } 3066 } 3067 } 3068 VM_OBJECT_WUNLOCK(object); 3069 } else 3070 entry->object.vm_object = NULL; 3071 if (map->system_map) 3072 vm_map_entry_deallocate(entry, TRUE); 3073 else { 3074 entry->next = curthread->td_map_def_user; 3075 curthread->td_map_def_user = entry; 3076 } 3077 } 3078 3079 /* 3080 * vm_map_delete: [ internal use only ] 3081 * 3082 * Deallocates the given address range from the target 3083 * map. 3084 */ 3085 int 3086 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3087 { 3088 vm_map_entry_t entry; 3089 vm_map_entry_t first_entry; 3090 3091 VM_MAP_ASSERT_LOCKED(map); 3092 if (start == end) 3093 return (KERN_SUCCESS); 3094 3095 /* 3096 * Find the start of the region, and clip it 3097 */ 3098 if (!vm_map_lookup_entry(map, start, &first_entry)) 3099 entry = first_entry->next; 3100 else { 3101 entry = first_entry; 3102 vm_map_clip_start(map, entry, start); 3103 } 3104 3105 /* 3106 * Step through all entries in this region 3107 */ 3108 while (entry->start < end) { 3109 vm_map_entry_t next; 3110 3111 /* 3112 * Wait for wiring or unwiring of an entry to complete. 3113 * Also wait for any system wirings to disappear on 3114 * user maps. 3115 */ 3116 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3117 (vm_map_pmap(map) != kernel_pmap && 3118 vm_map_entry_system_wired_count(entry) != 0)) { 3119 unsigned int last_timestamp; 3120 vm_offset_t saved_start; 3121 vm_map_entry_t tmp_entry; 3122 3123 saved_start = entry->start; 3124 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3125 last_timestamp = map->timestamp; 3126 (void) vm_map_unlock_and_wait(map, 0); 3127 vm_map_lock(map); 3128 if (last_timestamp + 1 != map->timestamp) { 3129 /* 3130 * Look again for the entry because the map was 3131 * modified while it was unlocked. 3132 * Specifically, the entry may have been 3133 * clipped, merged, or deleted. 3134 */ 3135 if (!vm_map_lookup_entry(map, saved_start, 3136 &tmp_entry)) 3137 entry = tmp_entry->next; 3138 else { 3139 entry = tmp_entry; 3140 vm_map_clip_start(map, entry, 3141 saved_start); 3142 } 3143 } 3144 continue; 3145 } 3146 vm_map_clip_end(map, entry, end); 3147 3148 next = entry->next; 3149 3150 /* 3151 * Unwire before removing addresses from the pmap; otherwise, 3152 * unwiring will put the entries back in the pmap. 3153 */ 3154 if (entry->wired_count != 0) 3155 vm_map_entry_unwire(map, entry); 3156 3157 /* 3158 * Remove mappings for the pages, but only if the 3159 * mappings could exist. For instance, it does not 3160 * make sense to call pmap_remove() for guard entries. 3161 */ 3162 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 3163 entry->object.vm_object != NULL) 3164 pmap_remove(map->pmap, entry->start, entry->end); 3165 3166 /* 3167 * Delete the entry only after removing all pmap 3168 * entries pointing to its pages. (Otherwise, its 3169 * page frames may be reallocated, and any modify bits 3170 * will be set in the wrong object!) 3171 */ 3172 vm_map_entry_delete(map, entry); 3173 entry = next; 3174 } 3175 return (KERN_SUCCESS); 3176 } 3177 3178 /* 3179 * vm_map_remove: 3180 * 3181 * Remove the given address range from the target map. 3182 * This is the exported form of vm_map_delete. 3183 */ 3184 int 3185 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3186 { 3187 int result; 3188 3189 vm_map_lock(map); 3190 VM_MAP_RANGE_CHECK(map, start, end); 3191 result = vm_map_delete(map, start, end); 3192 vm_map_unlock(map); 3193 return (result); 3194 } 3195 3196 /* 3197 * vm_map_check_protection: 3198 * 3199 * Assert that the target map allows the specified privilege on the 3200 * entire address region given. The entire region must be allocated. 3201 * 3202 * WARNING! This code does not and should not check whether the 3203 * contents of the region is accessible. For example a smaller file 3204 * might be mapped into a larger address space. 3205 * 3206 * NOTE! This code is also called by munmap(). 3207 * 3208 * The map must be locked. A read lock is sufficient. 3209 */ 3210 boolean_t 3211 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3212 vm_prot_t protection) 3213 { 3214 vm_map_entry_t entry; 3215 vm_map_entry_t tmp_entry; 3216 3217 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3218 return (FALSE); 3219 entry = tmp_entry; 3220 3221 while (start < end) { 3222 /* 3223 * No holes allowed! 3224 */ 3225 if (start < entry->start) 3226 return (FALSE); 3227 /* 3228 * Check protection associated with entry. 3229 */ 3230 if ((entry->protection & protection) != protection) 3231 return (FALSE); 3232 /* go to next entry */ 3233 start = entry->end; 3234 entry = entry->next; 3235 } 3236 return (TRUE); 3237 } 3238 3239 /* 3240 * vm_map_copy_entry: 3241 * 3242 * Copies the contents of the source entry to the destination 3243 * entry. The entries *must* be aligned properly. 3244 */ 3245 static void 3246 vm_map_copy_entry( 3247 vm_map_t src_map, 3248 vm_map_t dst_map, 3249 vm_map_entry_t src_entry, 3250 vm_map_entry_t dst_entry, 3251 vm_ooffset_t *fork_charge) 3252 { 3253 vm_object_t src_object; 3254 vm_map_entry_t fake_entry; 3255 vm_offset_t size; 3256 struct ucred *cred; 3257 int charged; 3258 3259 VM_MAP_ASSERT_LOCKED(dst_map); 3260 3261 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3262 return; 3263 3264 if (src_entry->wired_count == 0 || 3265 (src_entry->protection & VM_PROT_WRITE) == 0) { 3266 /* 3267 * If the source entry is marked needs_copy, it is already 3268 * write-protected. 3269 */ 3270 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3271 (src_entry->protection & VM_PROT_WRITE) != 0) { 3272 pmap_protect(src_map->pmap, 3273 src_entry->start, 3274 src_entry->end, 3275 src_entry->protection & ~VM_PROT_WRITE); 3276 } 3277 3278 /* 3279 * Make a copy of the object. 3280 */ 3281 size = src_entry->end - src_entry->start; 3282 if ((src_object = src_entry->object.vm_object) != NULL) { 3283 VM_OBJECT_WLOCK(src_object); 3284 charged = ENTRY_CHARGED(src_entry); 3285 if (src_object->handle == NULL && 3286 (src_object->type == OBJT_DEFAULT || 3287 src_object->type == OBJT_SWAP)) { 3288 vm_object_collapse(src_object); 3289 if ((src_object->flags & (OBJ_NOSPLIT | 3290 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 3291 vm_object_split(src_entry); 3292 src_object = 3293 src_entry->object.vm_object; 3294 } 3295 } 3296 vm_object_reference_locked(src_object); 3297 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3298 if (src_entry->cred != NULL && 3299 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3300 KASSERT(src_object->cred == NULL, 3301 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3302 src_object)); 3303 src_object->cred = src_entry->cred; 3304 src_object->charge = size; 3305 } 3306 VM_OBJECT_WUNLOCK(src_object); 3307 dst_entry->object.vm_object = src_object; 3308 if (charged) { 3309 cred = curthread->td_ucred; 3310 crhold(cred); 3311 dst_entry->cred = cred; 3312 *fork_charge += size; 3313 if (!(src_entry->eflags & 3314 MAP_ENTRY_NEEDS_COPY)) { 3315 crhold(cred); 3316 src_entry->cred = cred; 3317 *fork_charge += size; 3318 } 3319 } 3320 src_entry->eflags |= MAP_ENTRY_COW | 3321 MAP_ENTRY_NEEDS_COPY; 3322 dst_entry->eflags |= MAP_ENTRY_COW | 3323 MAP_ENTRY_NEEDS_COPY; 3324 dst_entry->offset = src_entry->offset; 3325 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3326 /* 3327 * MAP_ENTRY_VN_WRITECNT cannot 3328 * indicate write reference from 3329 * src_entry, since the entry is 3330 * marked as needs copy. Allocate a 3331 * fake entry that is used to 3332 * decrement object->un_pager.vnp.writecount 3333 * at the appropriate time. Attach 3334 * fake_entry to the deferred list. 3335 */ 3336 fake_entry = vm_map_entry_create(dst_map); 3337 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; 3338 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; 3339 vm_object_reference(src_object); 3340 fake_entry->object.vm_object = src_object; 3341 fake_entry->start = src_entry->start; 3342 fake_entry->end = src_entry->end; 3343 fake_entry->next = curthread->td_map_def_user; 3344 curthread->td_map_def_user = fake_entry; 3345 } 3346 3347 pmap_copy(dst_map->pmap, src_map->pmap, 3348 dst_entry->start, dst_entry->end - dst_entry->start, 3349 src_entry->start); 3350 } else { 3351 dst_entry->object.vm_object = NULL; 3352 dst_entry->offset = 0; 3353 if (src_entry->cred != NULL) { 3354 dst_entry->cred = curthread->td_ucred; 3355 crhold(dst_entry->cred); 3356 *fork_charge += size; 3357 } 3358 } 3359 } else { 3360 /* 3361 * We don't want to make writeable wired pages copy-on-write. 3362 * Immediately copy these pages into the new map by simulating 3363 * page faults. The new pages are pageable. 3364 */ 3365 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3366 fork_charge); 3367 } 3368 } 3369 3370 /* 3371 * vmspace_map_entry_forked: 3372 * Update the newly-forked vmspace each time a map entry is inherited 3373 * or copied. The values for vm_dsize and vm_tsize are approximate 3374 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3375 */ 3376 static void 3377 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3378 vm_map_entry_t entry) 3379 { 3380 vm_size_t entrysize; 3381 vm_offset_t newend; 3382 3383 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3384 return; 3385 entrysize = entry->end - entry->start; 3386 vm2->vm_map.size += entrysize; 3387 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3388 vm2->vm_ssize += btoc(entrysize); 3389 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3390 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3391 newend = MIN(entry->end, 3392 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3393 vm2->vm_dsize += btoc(newend - entry->start); 3394 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3395 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3396 newend = MIN(entry->end, 3397 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3398 vm2->vm_tsize += btoc(newend - entry->start); 3399 } 3400 } 3401 3402 /* 3403 * vmspace_fork: 3404 * Create a new process vmspace structure and vm_map 3405 * based on those of an existing process. The new map 3406 * is based on the old map, according to the inheritance 3407 * values on the regions in that map. 3408 * 3409 * XXX It might be worth coalescing the entries added to the new vmspace. 3410 * 3411 * The source map must not be locked. 3412 */ 3413 struct vmspace * 3414 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3415 { 3416 struct vmspace *vm2; 3417 vm_map_t new_map, old_map; 3418 vm_map_entry_t new_entry, old_entry; 3419 vm_object_t object; 3420 int locked; 3421 vm_inherit_t inh; 3422 3423 old_map = &vm1->vm_map; 3424 /* Copy immutable fields of vm1 to vm2. */ 3425 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), NULL); 3426 if (vm2 == NULL) 3427 return (NULL); 3428 vm2->vm_taddr = vm1->vm_taddr; 3429 vm2->vm_daddr = vm1->vm_daddr; 3430 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3431 vm_map_lock(old_map); 3432 if (old_map->busy) 3433 vm_map_wait_busy(old_map); 3434 new_map = &vm2->vm_map; 3435 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3436 KASSERT(locked, ("vmspace_fork: lock failed")); 3437 3438 old_entry = old_map->header.next; 3439 3440 while (old_entry != &old_map->header) { 3441 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3442 panic("vm_map_fork: encountered a submap"); 3443 3444 inh = old_entry->inheritance; 3445 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3446 inh != VM_INHERIT_NONE) 3447 inh = VM_INHERIT_COPY; 3448 3449 switch (inh) { 3450 case VM_INHERIT_NONE: 3451 break; 3452 3453 case VM_INHERIT_SHARE: 3454 /* 3455 * Clone the entry, creating the shared object if necessary. 3456 */ 3457 object = old_entry->object.vm_object; 3458 if (object == NULL) { 3459 object = vm_object_allocate(OBJT_DEFAULT, 3460 atop(old_entry->end - old_entry->start)); 3461 old_entry->object.vm_object = object; 3462 old_entry->offset = 0; 3463 if (old_entry->cred != NULL) { 3464 object->cred = old_entry->cred; 3465 object->charge = old_entry->end - 3466 old_entry->start; 3467 old_entry->cred = NULL; 3468 } 3469 } 3470 3471 /* 3472 * Add the reference before calling vm_object_shadow 3473 * to insure that a shadow object is created. 3474 */ 3475 vm_object_reference(object); 3476 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3477 vm_object_shadow(&old_entry->object.vm_object, 3478 &old_entry->offset, 3479 old_entry->end - old_entry->start); 3480 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3481 /* Transfer the second reference too. */ 3482 vm_object_reference( 3483 old_entry->object.vm_object); 3484 3485 /* 3486 * As in vm_map_simplify_entry(), the 3487 * vnode lock will not be acquired in 3488 * this call to vm_object_deallocate(). 3489 */ 3490 vm_object_deallocate(object); 3491 object = old_entry->object.vm_object; 3492 } 3493 VM_OBJECT_WLOCK(object); 3494 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3495 if (old_entry->cred != NULL) { 3496 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3497 object->cred = old_entry->cred; 3498 object->charge = old_entry->end - old_entry->start; 3499 old_entry->cred = NULL; 3500 } 3501 3502 /* 3503 * Assert the correct state of the vnode 3504 * v_writecount while the object is locked, to 3505 * not relock it later for the assertion 3506 * correctness. 3507 */ 3508 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT && 3509 object->type == OBJT_VNODE) { 3510 KASSERT(((struct vnode *)object->handle)-> 3511 v_writecount > 0, 3512 ("vmspace_fork: v_writecount %p", object)); 3513 KASSERT(object->un_pager.vnp.writemappings > 0, 3514 ("vmspace_fork: vnp.writecount %p", 3515 object)); 3516 } 3517 VM_OBJECT_WUNLOCK(object); 3518 3519 /* 3520 * Clone the entry, referencing the shared object. 3521 */ 3522 new_entry = vm_map_entry_create(new_map); 3523 *new_entry = *old_entry; 3524 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3525 MAP_ENTRY_IN_TRANSITION); 3526 new_entry->wiring_thread = NULL; 3527 new_entry->wired_count = 0; 3528 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { 3529 vnode_pager_update_writecount(object, 3530 new_entry->start, new_entry->end); 3531 } 3532 3533 /* 3534 * Insert the entry into the new map -- we know we're 3535 * inserting at the end of the new map. 3536 */ 3537 vm_map_entry_link(new_map, new_map->header.prev, 3538 new_entry); 3539 vmspace_map_entry_forked(vm1, vm2, new_entry); 3540 3541 /* 3542 * Update the physical map 3543 */ 3544 pmap_copy(new_map->pmap, old_map->pmap, 3545 new_entry->start, 3546 (old_entry->end - old_entry->start), 3547 old_entry->start); 3548 break; 3549 3550 case VM_INHERIT_COPY: 3551 /* 3552 * Clone the entry and link into the map. 3553 */ 3554 new_entry = vm_map_entry_create(new_map); 3555 *new_entry = *old_entry; 3556 /* 3557 * Copied entry is COW over the old object. 3558 */ 3559 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3560 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); 3561 new_entry->wiring_thread = NULL; 3562 new_entry->wired_count = 0; 3563 new_entry->object.vm_object = NULL; 3564 new_entry->cred = NULL; 3565 vm_map_entry_link(new_map, new_map->header.prev, 3566 new_entry); 3567 vmspace_map_entry_forked(vm1, vm2, new_entry); 3568 vm_map_copy_entry(old_map, new_map, old_entry, 3569 new_entry, fork_charge); 3570 break; 3571 3572 case VM_INHERIT_ZERO: 3573 /* 3574 * Create a new anonymous mapping entry modelled from 3575 * the old one. 3576 */ 3577 new_entry = vm_map_entry_create(new_map); 3578 memset(new_entry, 0, sizeof(*new_entry)); 3579 3580 new_entry->start = old_entry->start; 3581 new_entry->end = old_entry->end; 3582 new_entry->eflags = old_entry->eflags & 3583 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 3584 MAP_ENTRY_VN_WRITECNT); 3585 new_entry->protection = old_entry->protection; 3586 new_entry->max_protection = old_entry->max_protection; 3587 new_entry->inheritance = VM_INHERIT_ZERO; 3588 3589 vm_map_entry_link(new_map, new_map->header.prev, 3590 new_entry); 3591 vmspace_map_entry_forked(vm1, vm2, new_entry); 3592 3593 new_entry->cred = curthread->td_ucred; 3594 crhold(new_entry->cred); 3595 *fork_charge += (new_entry->end - new_entry->start); 3596 3597 break; 3598 } 3599 old_entry = old_entry->next; 3600 } 3601 /* 3602 * Use inlined vm_map_unlock() to postpone handling the deferred 3603 * map entries, which cannot be done until both old_map and 3604 * new_map locks are released. 3605 */ 3606 sx_xunlock(&old_map->lock); 3607 sx_xunlock(&new_map->lock); 3608 vm_map_process_deferred(); 3609 3610 return (vm2); 3611 } 3612 3613 /* 3614 * Create a process's stack for exec_new_vmspace(). This function is never 3615 * asked to wire the newly created stack. 3616 */ 3617 int 3618 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3619 vm_prot_t prot, vm_prot_t max, int cow) 3620 { 3621 vm_size_t growsize, init_ssize; 3622 rlim_t vmemlim; 3623 int rv; 3624 3625 MPASS((map->flags & MAP_WIREFUTURE) == 0); 3626 growsize = sgrowsiz; 3627 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 3628 vm_map_lock(map); 3629 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3630 /* If we would blow our VMEM resource limit, no go */ 3631 if (map->size + init_ssize > vmemlim) { 3632 rv = KERN_NO_SPACE; 3633 goto out; 3634 } 3635 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 3636 max, cow); 3637 out: 3638 vm_map_unlock(map); 3639 return (rv); 3640 } 3641 3642 static int stack_guard_page = 1; 3643 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 3644 &stack_guard_page, 0, 3645 "Specifies the number of guard pages for a stack that grows"); 3646 3647 static int 3648 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3649 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 3650 { 3651 vm_map_entry_t new_entry, prev_entry; 3652 vm_offset_t bot, gap_bot, gap_top, top; 3653 vm_size_t init_ssize, sgp; 3654 int orient, rv; 3655 3656 /* 3657 * The stack orientation is piggybacked with the cow argument. 3658 * Extract it into orient and mask the cow argument so that we 3659 * don't pass it around further. 3660 */ 3661 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 3662 KASSERT(orient != 0, ("No stack grow direction")); 3663 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 3664 ("bi-dir stack")); 3665 3666 if (addrbos < vm_map_min(map) || 3667 addrbos + max_ssize > vm_map_max(map) || 3668 addrbos + max_ssize <= addrbos) 3669 return (KERN_INVALID_ADDRESS); 3670 sgp = (vm_size_t)stack_guard_page * PAGE_SIZE; 3671 if (sgp >= max_ssize) 3672 return (KERN_INVALID_ARGUMENT); 3673 3674 init_ssize = growsize; 3675 if (max_ssize < init_ssize + sgp) 3676 init_ssize = max_ssize - sgp; 3677 3678 /* If addr is already mapped, no go */ 3679 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 3680 return (KERN_NO_SPACE); 3681 3682 /* 3683 * If we can't accommodate max_ssize in the current mapping, no go. 3684 */ 3685 if (prev_entry->next->start < addrbos + max_ssize) 3686 return (KERN_NO_SPACE); 3687 3688 /* 3689 * We initially map a stack of only init_ssize. We will grow as 3690 * needed later. Depending on the orientation of the stack (i.e. 3691 * the grow direction) we either map at the top of the range, the 3692 * bottom of the range or in the middle. 3693 * 3694 * Note: we would normally expect prot and max to be VM_PROT_ALL, 3695 * and cow to be 0. Possibly we should eliminate these as input 3696 * parameters, and just pass these values here in the insert call. 3697 */ 3698 if (orient == MAP_STACK_GROWS_DOWN) { 3699 bot = addrbos + max_ssize - init_ssize; 3700 top = bot + init_ssize; 3701 gap_bot = addrbos; 3702 gap_top = bot; 3703 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 3704 bot = addrbos; 3705 top = bot + init_ssize; 3706 gap_bot = top; 3707 gap_top = addrbos + max_ssize; 3708 } 3709 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 3710 if (rv != KERN_SUCCESS) 3711 return (rv); 3712 new_entry = prev_entry->next; 3713 KASSERT(new_entry->end == top || new_entry->start == bot, 3714 ("Bad entry start/end for new stack entry")); 3715 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 3716 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 3717 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 3718 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 3719 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 3720 ("new entry lacks MAP_ENTRY_GROWS_UP")); 3721 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 3722 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 3723 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 3724 if (rv != KERN_SUCCESS) 3725 (void)vm_map_delete(map, bot, top); 3726 return (rv); 3727 } 3728 3729 /* 3730 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 3731 * successfully grow the stack. 3732 */ 3733 static int 3734 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 3735 { 3736 vm_map_entry_t stack_entry; 3737 struct proc *p; 3738 struct vmspace *vm; 3739 struct ucred *cred; 3740 vm_offset_t gap_end, gap_start, grow_start; 3741 size_t grow_amount, guard, max_grow; 3742 rlim_t lmemlim, stacklim, vmemlim; 3743 int rv, rv1; 3744 bool gap_deleted, grow_down, is_procstack; 3745 #ifdef notyet 3746 uint64_t limit; 3747 #endif 3748 #ifdef RACCT 3749 int error; 3750 #endif 3751 3752 p = curproc; 3753 vm = p->p_vmspace; 3754 3755 /* 3756 * Disallow stack growth when the access is performed by a 3757 * debugger or AIO daemon. The reason is that the wrong 3758 * resource limits are applied. 3759 */ 3760 if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL) 3761 return (KERN_FAILURE); 3762 3763 MPASS(!map->system_map); 3764 3765 guard = stack_guard_page * PAGE_SIZE; 3766 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 3767 stacklim = lim_cur(curthread, RLIMIT_STACK); 3768 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 3769 retry: 3770 /* If addr is not in a hole for a stack grow area, no need to grow. */ 3771 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 3772 return (KERN_FAILURE); 3773 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 3774 return (KERN_SUCCESS); 3775 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 3776 stack_entry = gap_entry->next; 3777 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 3778 stack_entry->start != gap_entry->end) 3779 return (KERN_FAILURE); 3780 grow_amount = round_page(stack_entry->start - addr); 3781 grow_down = true; 3782 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 3783 stack_entry = gap_entry->prev; 3784 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 3785 stack_entry->end != gap_entry->start) 3786 return (KERN_FAILURE); 3787 grow_amount = round_page(addr + 1 - stack_entry->end); 3788 grow_down = false; 3789 } else { 3790 return (KERN_FAILURE); 3791 } 3792 max_grow = gap_entry->end - gap_entry->start; 3793 if (guard > max_grow) 3794 return (KERN_NO_SPACE); 3795 max_grow -= guard; 3796 if (grow_amount > max_grow) 3797 return (KERN_NO_SPACE); 3798 3799 /* 3800 * If this is the main process stack, see if we're over the stack 3801 * limit. 3802 */ 3803 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 3804 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 3805 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 3806 return (KERN_NO_SPACE); 3807 3808 #ifdef RACCT 3809 if (racct_enable) { 3810 PROC_LOCK(p); 3811 if (is_procstack && racct_set(p, RACCT_STACK, 3812 ctob(vm->vm_ssize) + grow_amount)) { 3813 PROC_UNLOCK(p); 3814 return (KERN_NO_SPACE); 3815 } 3816 PROC_UNLOCK(p); 3817 } 3818 #endif 3819 3820 grow_amount = roundup(grow_amount, sgrowsiz); 3821 if (grow_amount > max_grow) 3822 grow_amount = max_grow; 3823 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 3824 grow_amount = trunc_page((vm_size_t)stacklim) - 3825 ctob(vm->vm_ssize); 3826 } 3827 3828 #ifdef notyet 3829 PROC_LOCK(p); 3830 limit = racct_get_available(p, RACCT_STACK); 3831 PROC_UNLOCK(p); 3832 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 3833 grow_amount = limit - ctob(vm->vm_ssize); 3834 #endif 3835 3836 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 3837 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 3838 rv = KERN_NO_SPACE; 3839 goto out; 3840 } 3841 #ifdef RACCT 3842 if (racct_enable) { 3843 PROC_LOCK(p); 3844 if (racct_set(p, RACCT_MEMLOCK, 3845 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 3846 PROC_UNLOCK(p); 3847 rv = KERN_NO_SPACE; 3848 goto out; 3849 } 3850 PROC_UNLOCK(p); 3851 } 3852 #endif 3853 } 3854 3855 /* If we would blow our VMEM resource limit, no go */ 3856 if (map->size + grow_amount > vmemlim) { 3857 rv = KERN_NO_SPACE; 3858 goto out; 3859 } 3860 #ifdef RACCT 3861 if (racct_enable) { 3862 PROC_LOCK(p); 3863 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 3864 PROC_UNLOCK(p); 3865 rv = KERN_NO_SPACE; 3866 goto out; 3867 } 3868 PROC_UNLOCK(p); 3869 } 3870 #endif 3871 3872 if (vm_map_lock_upgrade(map)) { 3873 gap_entry = NULL; 3874 vm_map_lock_read(map); 3875 goto retry; 3876 } 3877 3878 if (grow_down) { 3879 grow_start = gap_entry->end - grow_amount; 3880 if (gap_entry->start + grow_amount == gap_entry->end) { 3881 gap_start = gap_entry->start; 3882 gap_end = gap_entry->end; 3883 vm_map_entry_delete(map, gap_entry); 3884 gap_deleted = true; 3885 } else { 3886 MPASS(gap_entry->start < gap_entry->end - grow_amount); 3887 gap_entry->end -= grow_amount; 3888 vm_map_entry_resize_free(map, gap_entry); 3889 gap_deleted = false; 3890 } 3891 rv = vm_map_insert(map, NULL, 0, grow_start, 3892 grow_start + grow_amount, 3893 stack_entry->protection, stack_entry->max_protection, 3894 MAP_STACK_GROWS_DOWN); 3895 if (rv != KERN_SUCCESS) { 3896 if (gap_deleted) { 3897 rv1 = vm_map_insert(map, NULL, 0, gap_start, 3898 gap_end, VM_PROT_NONE, VM_PROT_NONE, 3899 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 3900 MPASS(rv1 == KERN_SUCCESS); 3901 } else { 3902 gap_entry->end += grow_amount; 3903 vm_map_entry_resize_free(map, gap_entry); 3904 } 3905 } 3906 } else { 3907 grow_start = stack_entry->end; 3908 cred = stack_entry->cred; 3909 if (cred == NULL && stack_entry->object.vm_object != NULL) 3910 cred = stack_entry->object.vm_object->cred; 3911 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 3912 rv = KERN_NO_SPACE; 3913 /* Grow the underlying object if applicable. */ 3914 else if (stack_entry->object.vm_object == NULL || 3915 vm_object_coalesce(stack_entry->object.vm_object, 3916 stack_entry->offset, 3917 (vm_size_t)(stack_entry->end - stack_entry->start), 3918 (vm_size_t)grow_amount, cred != NULL)) { 3919 if (gap_entry->start + grow_amount == gap_entry->end) 3920 vm_map_entry_delete(map, gap_entry); 3921 else 3922 gap_entry->start += grow_amount; 3923 stack_entry->end += grow_amount; 3924 map->size += grow_amount; 3925 vm_map_entry_resize_free(map, stack_entry); 3926 rv = KERN_SUCCESS; 3927 } else 3928 rv = KERN_FAILURE; 3929 } 3930 if (rv == KERN_SUCCESS && is_procstack) 3931 vm->vm_ssize += btoc(grow_amount); 3932 3933 /* 3934 * Heed the MAP_WIREFUTURE flag if it was set for this process. 3935 */ 3936 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 3937 vm_map_unlock(map); 3938 vm_map_wire(map, grow_start, grow_start + grow_amount, 3939 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 3940 vm_map_lock_read(map); 3941 } else 3942 vm_map_lock_downgrade(map); 3943 3944 out: 3945 #ifdef RACCT 3946 if (racct_enable && rv != KERN_SUCCESS) { 3947 PROC_LOCK(p); 3948 error = racct_set(p, RACCT_VMEM, map->size); 3949 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 3950 if (!old_mlock) { 3951 error = racct_set(p, RACCT_MEMLOCK, 3952 ptoa(pmap_wired_count(map->pmap))); 3953 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 3954 } 3955 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 3956 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 3957 PROC_UNLOCK(p); 3958 } 3959 #endif 3960 3961 return (rv); 3962 } 3963 3964 /* 3965 * Unshare the specified VM space for exec. If other processes are 3966 * mapped to it, then create a new one. The new vmspace is null. 3967 */ 3968 int 3969 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 3970 { 3971 struct vmspace *oldvmspace = p->p_vmspace; 3972 struct vmspace *newvmspace; 3973 3974 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 3975 ("vmspace_exec recursed")); 3976 newvmspace = vmspace_alloc(minuser, maxuser, NULL); 3977 if (newvmspace == NULL) 3978 return (ENOMEM); 3979 newvmspace->vm_swrss = oldvmspace->vm_swrss; 3980 /* 3981 * This code is written like this for prototype purposes. The 3982 * goal is to avoid running down the vmspace here, but let the 3983 * other process's that are still using the vmspace to finally 3984 * run it down. Even though there is little or no chance of blocking 3985 * here, it is a good idea to keep this form for future mods. 3986 */ 3987 PROC_VMSPACE_LOCK(p); 3988 p->p_vmspace = newvmspace; 3989 PROC_VMSPACE_UNLOCK(p); 3990 if (p == curthread->td_proc) 3991 pmap_activate(curthread); 3992 curthread->td_pflags |= TDP_EXECVMSPC; 3993 return (0); 3994 } 3995 3996 /* 3997 * Unshare the specified VM space for forcing COW. This 3998 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3999 */ 4000 int 4001 vmspace_unshare(struct proc *p) 4002 { 4003 struct vmspace *oldvmspace = p->p_vmspace; 4004 struct vmspace *newvmspace; 4005 vm_ooffset_t fork_charge; 4006 4007 if (oldvmspace->vm_refcnt == 1) 4008 return (0); 4009 fork_charge = 0; 4010 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4011 if (newvmspace == NULL) 4012 return (ENOMEM); 4013 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4014 vmspace_free(newvmspace); 4015 return (ENOMEM); 4016 } 4017 PROC_VMSPACE_LOCK(p); 4018 p->p_vmspace = newvmspace; 4019 PROC_VMSPACE_UNLOCK(p); 4020 if (p == curthread->td_proc) 4021 pmap_activate(curthread); 4022 vmspace_free(oldvmspace); 4023 return (0); 4024 } 4025 4026 /* 4027 * vm_map_lookup: 4028 * 4029 * Finds the VM object, offset, and 4030 * protection for a given virtual address in the 4031 * specified map, assuming a page fault of the 4032 * type specified. 4033 * 4034 * Leaves the map in question locked for read; return 4035 * values are guaranteed until a vm_map_lookup_done 4036 * call is performed. Note that the map argument 4037 * is in/out; the returned map must be used in 4038 * the call to vm_map_lookup_done. 4039 * 4040 * A handle (out_entry) is returned for use in 4041 * vm_map_lookup_done, to make that fast. 4042 * 4043 * If a lookup is requested with "write protection" 4044 * specified, the map may be changed to perform virtual 4045 * copying operations, although the data referenced will 4046 * remain the same. 4047 */ 4048 int 4049 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4050 vm_offset_t vaddr, 4051 vm_prot_t fault_typea, 4052 vm_map_entry_t *out_entry, /* OUT */ 4053 vm_object_t *object, /* OUT */ 4054 vm_pindex_t *pindex, /* OUT */ 4055 vm_prot_t *out_prot, /* OUT */ 4056 boolean_t *wired) /* OUT */ 4057 { 4058 vm_map_entry_t entry; 4059 vm_map_t map = *var_map; 4060 vm_prot_t prot; 4061 vm_prot_t fault_type = fault_typea; 4062 vm_object_t eobject; 4063 vm_size_t size; 4064 struct ucred *cred; 4065 4066 RetryLookup: 4067 4068 vm_map_lock_read(map); 4069 4070 RetryLookupLocked: 4071 /* 4072 * Lookup the faulting address. 4073 */ 4074 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4075 vm_map_unlock_read(map); 4076 return (KERN_INVALID_ADDRESS); 4077 } 4078 4079 entry = *out_entry; 4080 4081 /* 4082 * Handle submaps. 4083 */ 4084 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4085 vm_map_t old_map = map; 4086 4087 *var_map = map = entry->object.sub_map; 4088 vm_map_unlock_read(old_map); 4089 goto RetryLookup; 4090 } 4091 4092 /* 4093 * Check whether this task is allowed to have this page. 4094 */ 4095 prot = entry->protection; 4096 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4097 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4098 if (prot == VM_PROT_NONE && map != kernel_map && 4099 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4100 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4101 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4102 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4103 goto RetryLookupLocked; 4104 } 4105 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4106 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4107 vm_map_unlock_read(map); 4108 return (KERN_PROTECTION_FAILURE); 4109 } 4110 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4111 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4112 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4113 ("entry %p flags %x", entry, entry->eflags)); 4114 if ((fault_typea & VM_PROT_COPY) != 0 && 4115 (entry->max_protection & VM_PROT_WRITE) == 0 && 4116 (entry->eflags & MAP_ENTRY_COW) == 0) { 4117 vm_map_unlock_read(map); 4118 return (KERN_PROTECTION_FAILURE); 4119 } 4120 4121 /* 4122 * If this page is not pageable, we have to get it for all possible 4123 * accesses. 4124 */ 4125 *wired = (entry->wired_count != 0); 4126 if (*wired) 4127 fault_type = entry->protection; 4128 size = entry->end - entry->start; 4129 /* 4130 * If the entry was copy-on-write, we either ... 4131 */ 4132 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4133 /* 4134 * If we want to write the page, we may as well handle that 4135 * now since we've got the map locked. 4136 * 4137 * If we don't need to write the page, we just demote the 4138 * permissions allowed. 4139 */ 4140 if ((fault_type & VM_PROT_WRITE) != 0 || 4141 (fault_typea & VM_PROT_COPY) != 0) { 4142 /* 4143 * Make a new object, and place it in the object 4144 * chain. Note that no new references have appeared 4145 * -- one just moved from the map to the new 4146 * object. 4147 */ 4148 if (vm_map_lock_upgrade(map)) 4149 goto RetryLookup; 4150 4151 if (entry->cred == NULL) { 4152 /* 4153 * The debugger owner is charged for 4154 * the memory. 4155 */ 4156 cred = curthread->td_ucred; 4157 crhold(cred); 4158 if (!swap_reserve_by_cred(size, cred)) { 4159 crfree(cred); 4160 vm_map_unlock(map); 4161 return (KERN_RESOURCE_SHORTAGE); 4162 } 4163 entry->cred = cred; 4164 } 4165 vm_object_shadow(&entry->object.vm_object, 4166 &entry->offset, size); 4167 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4168 eobject = entry->object.vm_object; 4169 if (eobject->cred != NULL) { 4170 /* 4171 * The object was not shadowed. 4172 */ 4173 swap_release_by_cred(size, entry->cred); 4174 crfree(entry->cred); 4175 entry->cred = NULL; 4176 } else if (entry->cred != NULL) { 4177 VM_OBJECT_WLOCK(eobject); 4178 eobject->cred = entry->cred; 4179 eobject->charge = size; 4180 VM_OBJECT_WUNLOCK(eobject); 4181 entry->cred = NULL; 4182 } 4183 4184 vm_map_lock_downgrade(map); 4185 } else { 4186 /* 4187 * We're attempting to read a copy-on-write page -- 4188 * don't allow writes. 4189 */ 4190 prot &= ~VM_PROT_WRITE; 4191 } 4192 } 4193 4194 /* 4195 * Create an object if necessary. 4196 */ 4197 if (entry->object.vm_object == NULL && 4198 !map->system_map) { 4199 if (vm_map_lock_upgrade(map)) 4200 goto RetryLookup; 4201 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 4202 atop(size)); 4203 entry->offset = 0; 4204 if (entry->cred != NULL) { 4205 VM_OBJECT_WLOCK(entry->object.vm_object); 4206 entry->object.vm_object->cred = entry->cred; 4207 entry->object.vm_object->charge = size; 4208 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4209 entry->cred = NULL; 4210 } 4211 vm_map_lock_downgrade(map); 4212 } 4213 4214 /* 4215 * Return the object/offset from this entry. If the entry was 4216 * copy-on-write or empty, it has been fixed up. 4217 */ 4218 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4219 *object = entry->object.vm_object; 4220 4221 *out_prot = prot; 4222 return (KERN_SUCCESS); 4223 } 4224 4225 /* 4226 * vm_map_lookup_locked: 4227 * 4228 * Lookup the faulting address. A version of vm_map_lookup that returns 4229 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4230 */ 4231 int 4232 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4233 vm_offset_t vaddr, 4234 vm_prot_t fault_typea, 4235 vm_map_entry_t *out_entry, /* OUT */ 4236 vm_object_t *object, /* OUT */ 4237 vm_pindex_t *pindex, /* OUT */ 4238 vm_prot_t *out_prot, /* OUT */ 4239 boolean_t *wired) /* OUT */ 4240 { 4241 vm_map_entry_t entry; 4242 vm_map_t map = *var_map; 4243 vm_prot_t prot; 4244 vm_prot_t fault_type = fault_typea; 4245 4246 /* 4247 * Lookup the faulting address. 4248 */ 4249 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4250 return (KERN_INVALID_ADDRESS); 4251 4252 entry = *out_entry; 4253 4254 /* 4255 * Fail if the entry refers to a submap. 4256 */ 4257 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4258 return (KERN_FAILURE); 4259 4260 /* 4261 * Check whether this task is allowed to have this page. 4262 */ 4263 prot = entry->protection; 4264 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4265 if ((fault_type & prot) != fault_type) 4266 return (KERN_PROTECTION_FAILURE); 4267 4268 /* 4269 * If this page is not pageable, we have to get it for all possible 4270 * accesses. 4271 */ 4272 *wired = (entry->wired_count != 0); 4273 if (*wired) 4274 fault_type = entry->protection; 4275 4276 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4277 /* 4278 * Fail if the entry was copy-on-write for a write fault. 4279 */ 4280 if (fault_type & VM_PROT_WRITE) 4281 return (KERN_FAILURE); 4282 /* 4283 * We're attempting to read a copy-on-write page -- 4284 * don't allow writes. 4285 */ 4286 prot &= ~VM_PROT_WRITE; 4287 } 4288 4289 /* 4290 * Fail if an object should be created. 4291 */ 4292 if (entry->object.vm_object == NULL && !map->system_map) 4293 return (KERN_FAILURE); 4294 4295 /* 4296 * Return the object/offset from this entry. If the entry was 4297 * copy-on-write or empty, it has been fixed up. 4298 */ 4299 *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset); 4300 *object = entry->object.vm_object; 4301 4302 *out_prot = prot; 4303 return (KERN_SUCCESS); 4304 } 4305 4306 /* 4307 * vm_map_lookup_done: 4308 * 4309 * Releases locks acquired by a vm_map_lookup 4310 * (according to the handle returned by that lookup). 4311 */ 4312 void 4313 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4314 { 4315 /* 4316 * Unlock the main-level map 4317 */ 4318 vm_map_unlock_read(map); 4319 } 4320 4321 vm_offset_t 4322 vm_map_max_KBI(const struct vm_map *map) 4323 { 4324 4325 return (vm_map_max(map)); 4326 } 4327 4328 vm_offset_t 4329 vm_map_min_KBI(const struct vm_map *map) 4330 { 4331 4332 return (vm_map_min(map)); 4333 } 4334 4335 pmap_t 4336 vm_map_pmap_KBI(vm_map_t map) 4337 { 4338 4339 return (map->pmap); 4340 } 4341 4342 #include "opt_ddb.h" 4343 #ifdef DDB 4344 #include <sys/kernel.h> 4345 4346 #include <ddb/ddb.h> 4347 4348 static void 4349 vm_map_print(vm_map_t map) 4350 { 4351 vm_map_entry_t entry; 4352 4353 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4354 (void *)map, 4355 (void *)map->pmap, map->nentries, map->timestamp); 4356 4357 db_indent += 2; 4358 for (entry = map->header.next; entry != &map->header; 4359 entry = entry->next) { 4360 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4361 (void *)entry, (void *)entry->start, (void *)entry->end, 4362 entry->eflags); 4363 { 4364 static char *inheritance_name[4] = 4365 {"share", "copy", "none", "donate_copy"}; 4366 4367 db_iprintf(" prot=%x/%x/%s", 4368 entry->protection, 4369 entry->max_protection, 4370 inheritance_name[(int)(unsigned char)entry->inheritance]); 4371 if (entry->wired_count != 0) 4372 db_printf(", wired"); 4373 } 4374 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4375 db_printf(", share=%p, offset=0x%jx\n", 4376 (void *)entry->object.sub_map, 4377 (uintmax_t)entry->offset); 4378 if ((entry->prev == &map->header) || 4379 (entry->prev->object.sub_map != 4380 entry->object.sub_map)) { 4381 db_indent += 2; 4382 vm_map_print((vm_map_t)entry->object.sub_map); 4383 db_indent -= 2; 4384 } 4385 } else { 4386 if (entry->cred != NULL) 4387 db_printf(", ruid %d", entry->cred->cr_ruid); 4388 db_printf(", object=%p, offset=0x%jx", 4389 (void *)entry->object.vm_object, 4390 (uintmax_t)entry->offset); 4391 if (entry->object.vm_object && entry->object.vm_object->cred) 4392 db_printf(", obj ruid %d charge %jx", 4393 entry->object.vm_object->cred->cr_ruid, 4394 (uintmax_t)entry->object.vm_object->charge); 4395 if (entry->eflags & MAP_ENTRY_COW) 4396 db_printf(", copy (%s)", 4397 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4398 db_printf("\n"); 4399 4400 if ((entry->prev == &map->header) || 4401 (entry->prev->object.vm_object != 4402 entry->object.vm_object)) { 4403 db_indent += 2; 4404 vm_object_print((db_expr_t)(intptr_t) 4405 entry->object.vm_object, 4406 0, 0, (char *)0); 4407 db_indent -= 2; 4408 } 4409 } 4410 } 4411 db_indent -= 2; 4412 } 4413 4414 DB_SHOW_COMMAND(map, map) 4415 { 4416 4417 if (!have_addr) { 4418 db_printf("usage: show map <addr>\n"); 4419 return; 4420 } 4421 vm_map_print((vm_map_t)addr); 4422 } 4423 4424 DB_SHOW_COMMAND(procvm, procvm) 4425 { 4426 struct proc *p; 4427 4428 if (have_addr) { 4429 p = db_lookup_proc(addr); 4430 } else { 4431 p = curproc; 4432 } 4433 4434 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4435 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4436 (void *)vmspace_pmap(p->p_vmspace)); 4437 4438 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4439 } 4440 4441 #endif /* DDB */ 4442