1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/vmmeter.h> 75 #include <sys/mman.h> 76 #include <sys/vnode.h> 77 #include <sys/resourcevar.h> 78 #include <sys/file.h> 79 #include <sys/sysent.h> 80 #include <sys/shm.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 /* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a single hint is used to speed up lookups. 104 * 105 * Since portions of maps are specified by start/end addresses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * 113 * As mentioned above, virtual copy operations are performed 114 * by copying VM object references from one map to 115 * another, and then marking both regions as copy-on-write. 116 */ 117 118 /* 119 * vm_map_startup: 120 * 121 * Initialize the vm_map module. Must be called before 122 * any other vm_map routines. 123 * 124 * Map and entry structures are allocated from the general 125 * purpose memory pool with some exceptions: 126 * 127 * - The kernel map and kmem submap are allocated statically. 128 * - Kernel map entries are allocated out of a static pool. 129 * 130 * These restrictions are necessary since malloc() uses the 131 * maps and requires map entries. 132 */ 133 134 static struct mtx map_sleep_mtx; 135 static uma_zone_t mapentzone; 136 static uma_zone_t kmapentzone; 137 static uma_zone_t mapzone; 138 static uma_zone_t vmspace_zone; 139 static struct vm_object kmapentobj; 140 static int vmspace_zinit(void *mem, int size, int flags); 141 static void vmspace_zfini(void *mem, int size); 142 static int vm_map_zinit(void *mem, int ize, int flags); 143 static void vm_map_zfini(void *mem, int size); 144 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); 145 146 #ifdef INVARIANTS 147 static void vm_map_zdtor(void *mem, int size, void *arg); 148 static void vmspace_zdtor(void *mem, int size, void *arg); 149 #endif 150 151 /* 152 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 153 * stable. 154 */ 155 #define PROC_VMSPACE_LOCK(p) do { } while (0) 156 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 157 158 void 159 vm_map_startup(void) 160 { 161 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 162 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 163 #ifdef INVARIANTS 164 vm_map_zdtor, 165 #else 166 NULL, 167 #endif 168 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 169 uma_prealloc(mapzone, MAX_KMAP); 170 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 171 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 172 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 173 uma_prealloc(kmapentzone, MAX_KMAPENT); 174 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 175 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 176 } 177 178 static void 179 vmspace_zfini(void *mem, int size) 180 { 181 struct vmspace *vm; 182 183 vm = (struct vmspace *)mem; 184 pmap_release(vmspace_pmap(vm)); 185 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 186 } 187 188 static int 189 vmspace_zinit(void *mem, int size, int flags) 190 { 191 struct vmspace *vm; 192 193 vm = (struct vmspace *)mem; 194 195 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 196 pmap_pinit(vmspace_pmap(vm)); 197 return (0); 198 } 199 200 static void 201 vm_map_zfini(void *mem, int size) 202 { 203 vm_map_t map; 204 205 map = (vm_map_t)mem; 206 mtx_destroy(&map->system_mtx); 207 sx_destroy(&map->lock); 208 } 209 210 static int 211 vm_map_zinit(void *mem, int size, int flags) 212 { 213 vm_map_t map; 214 215 map = (vm_map_t)mem; 216 map->nentries = 0; 217 map->size = 0; 218 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 219 sx_init(&map->lock, "user map"); 220 return (0); 221 } 222 223 #ifdef INVARIANTS 224 static void 225 vmspace_zdtor(void *mem, int size, void *arg) 226 { 227 struct vmspace *vm; 228 229 vm = (struct vmspace *)mem; 230 231 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 232 } 233 static void 234 vm_map_zdtor(void *mem, int size, void *arg) 235 { 236 vm_map_t map; 237 238 map = (vm_map_t)mem; 239 KASSERT(map->nentries == 0, 240 ("map %p nentries == %d on free.", 241 map, map->nentries)); 242 KASSERT(map->size == 0, 243 ("map %p size == %lu on free.", 244 map, (unsigned long)map->size)); 245 } 246 #endif /* INVARIANTS */ 247 248 /* 249 * Allocate a vmspace structure, including a vm_map and pmap, 250 * and initialize those structures. The refcnt is set to 1. 251 */ 252 struct vmspace * 253 vmspace_alloc(min, max) 254 vm_offset_t min, max; 255 { 256 struct vmspace *vm; 257 258 vm = uma_zalloc(vmspace_zone, M_WAITOK); 259 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 260 _vm_map_init(&vm->vm_map, min, max); 261 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 262 vm->vm_refcnt = 1; 263 vm->vm_shm = NULL; 264 vm->vm_swrss = 0; 265 vm->vm_tsize = 0; 266 vm->vm_dsize = 0; 267 vm->vm_ssize = 0; 268 vm->vm_taddr = 0; 269 vm->vm_daddr = 0; 270 vm->vm_maxsaddr = 0; 271 return (vm); 272 } 273 274 void 275 vm_init2(void) 276 { 277 uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 278 (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + 279 maxproc * 2 + maxfiles); 280 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 281 #ifdef INVARIANTS 282 vmspace_zdtor, 283 #else 284 NULL, 285 #endif 286 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 287 } 288 289 static inline void 290 vmspace_dofree(struct vmspace *vm) 291 { 292 CTR1(KTR_VM, "vmspace_free: %p", vm); 293 294 /* 295 * Make sure any SysV shm is freed, it might not have been in 296 * exit1(). 297 */ 298 shmexit(vm); 299 300 /* 301 * Lock the map, to wait out all other references to it. 302 * Delete all of the mappings and pages they hold, then call 303 * the pmap module to reclaim anything left. 304 */ 305 (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, 306 vm->vm_map.max_offset); 307 308 uma_zfree(vmspace_zone, vm); 309 } 310 311 void 312 vmspace_free(struct vmspace *vm) 313 { 314 int refcnt; 315 316 if (vm->vm_refcnt == 0) 317 panic("vmspace_free: attempt to free already freed vmspace"); 318 319 do 320 refcnt = vm->vm_refcnt; 321 while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 322 if (refcnt == 1) 323 vmspace_dofree(vm); 324 } 325 326 void 327 vmspace_exitfree(struct proc *p) 328 { 329 struct vmspace *vm; 330 331 PROC_VMSPACE_LOCK(p); 332 vm = p->p_vmspace; 333 p->p_vmspace = NULL; 334 PROC_VMSPACE_UNLOCK(p); 335 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 336 vmspace_free(vm); 337 } 338 339 void 340 vmspace_exit(struct thread *td) 341 { 342 int refcnt; 343 struct vmspace *vm; 344 struct proc *p; 345 346 /* 347 * Release user portion of address space. 348 * This releases references to vnodes, 349 * which could cause I/O if the file has been unlinked. 350 * Need to do this early enough that we can still sleep. 351 * 352 * The last exiting process to reach this point releases as 353 * much of the environment as it can. vmspace_dofree() is the 354 * slower fallback in case another process had a temporary 355 * reference to the vmspace. 356 */ 357 358 p = td->td_proc; 359 vm = p->p_vmspace; 360 atomic_add_int(&vmspace0.vm_refcnt, 1); 361 do { 362 refcnt = vm->vm_refcnt; 363 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 364 /* Switch now since other proc might free vmspace */ 365 PROC_VMSPACE_LOCK(p); 366 p->p_vmspace = &vmspace0; 367 PROC_VMSPACE_UNLOCK(p); 368 pmap_activate(td); 369 } 370 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 371 if (refcnt == 1) { 372 if (p->p_vmspace != vm) { 373 /* vmspace not yet freed, switch back */ 374 PROC_VMSPACE_LOCK(p); 375 p->p_vmspace = vm; 376 PROC_VMSPACE_UNLOCK(p); 377 pmap_activate(td); 378 } 379 pmap_remove_pages(vmspace_pmap(vm)); 380 /* Switch now since this proc will free vmspace */ 381 PROC_VMSPACE_LOCK(p); 382 p->p_vmspace = &vmspace0; 383 PROC_VMSPACE_UNLOCK(p); 384 pmap_activate(td); 385 vmspace_dofree(vm); 386 } 387 } 388 389 /* Acquire reference to vmspace owned by another process. */ 390 391 struct vmspace * 392 vmspace_acquire_ref(struct proc *p) 393 { 394 struct vmspace *vm; 395 int refcnt; 396 397 PROC_VMSPACE_LOCK(p); 398 vm = p->p_vmspace; 399 if (vm == NULL) { 400 PROC_VMSPACE_UNLOCK(p); 401 return (NULL); 402 } 403 do { 404 refcnt = vm->vm_refcnt; 405 if (refcnt <= 0) { /* Avoid 0->1 transition */ 406 PROC_VMSPACE_UNLOCK(p); 407 return (NULL); 408 } 409 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); 410 if (vm != p->p_vmspace) { 411 PROC_VMSPACE_UNLOCK(p); 412 vmspace_free(vm); 413 return (NULL); 414 } 415 PROC_VMSPACE_UNLOCK(p); 416 return (vm); 417 } 418 419 void 420 _vm_map_lock(vm_map_t map, const char *file, int line) 421 { 422 423 if (map->system_map) 424 _mtx_lock_flags(&map->system_mtx, 0, file, line); 425 else 426 _sx_xlock(&map->lock, file, line); 427 map->timestamp++; 428 } 429 430 void 431 _vm_map_unlock(vm_map_t map, const char *file, int line) 432 { 433 434 if (map->system_map) 435 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 436 else 437 _sx_xunlock(&map->lock, file, line); 438 } 439 440 void 441 _vm_map_lock_read(vm_map_t map, const char *file, int line) 442 { 443 444 if (map->system_map) 445 _mtx_lock_flags(&map->system_mtx, 0, file, line); 446 else 447 _sx_xlock(&map->lock, file, line); 448 } 449 450 void 451 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 452 { 453 454 if (map->system_map) 455 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 456 else 457 _sx_xunlock(&map->lock, file, line); 458 } 459 460 int 461 _vm_map_trylock(vm_map_t map, const char *file, int line) 462 { 463 int error; 464 465 error = map->system_map ? 466 !_mtx_trylock(&map->system_mtx, 0, file, line) : 467 !_sx_try_xlock(&map->lock, file, line); 468 if (error == 0) 469 map->timestamp++; 470 return (error == 0); 471 } 472 473 int 474 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 475 { 476 int error; 477 478 error = map->system_map ? 479 !_mtx_trylock(&map->system_mtx, 0, file, line) : 480 !_sx_try_xlock(&map->lock, file, line); 481 return (error == 0); 482 } 483 484 int 485 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 486 { 487 488 #ifdef INVARIANTS 489 if (map->system_map) { 490 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 491 } else 492 _sx_assert(&map->lock, SX_XLOCKED, file, line); 493 #endif 494 map->timestamp++; 495 return (0); 496 } 497 498 void 499 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 500 { 501 502 #ifdef INVARIANTS 503 if (map->system_map) { 504 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 505 } else 506 _sx_assert(&map->lock, SX_XLOCKED, file, line); 507 #endif 508 } 509 510 /* 511 * vm_map_unlock_and_wait: 512 */ 513 int 514 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) 515 { 516 517 mtx_lock(&map_sleep_mtx); 518 vm_map_unlock(map); 519 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0)); 520 } 521 522 /* 523 * vm_map_wakeup: 524 */ 525 void 526 vm_map_wakeup(vm_map_t map) 527 { 528 529 /* 530 * Acquire and release map_sleep_mtx to prevent a wakeup() 531 * from being performed (and lost) between the vm_map_unlock() 532 * and the msleep() in vm_map_unlock_and_wait(). 533 */ 534 mtx_lock(&map_sleep_mtx); 535 mtx_unlock(&map_sleep_mtx); 536 wakeup(&map->root); 537 } 538 539 long 540 vmspace_resident_count(struct vmspace *vmspace) 541 { 542 return pmap_resident_count(vmspace_pmap(vmspace)); 543 } 544 545 long 546 vmspace_wired_count(struct vmspace *vmspace) 547 { 548 return pmap_wired_count(vmspace_pmap(vmspace)); 549 } 550 551 /* 552 * vm_map_create: 553 * 554 * Creates and returns a new empty VM map with 555 * the given physical map structure, and having 556 * the given lower and upper address bounds. 557 */ 558 vm_map_t 559 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 560 { 561 vm_map_t result; 562 563 result = uma_zalloc(mapzone, M_WAITOK); 564 CTR1(KTR_VM, "vm_map_create: %p", result); 565 _vm_map_init(result, min, max); 566 result->pmap = pmap; 567 return (result); 568 } 569 570 /* 571 * Initialize an existing vm_map structure 572 * such as that in the vmspace structure. 573 * The pmap is set elsewhere. 574 */ 575 static void 576 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 577 { 578 579 map->header.next = map->header.prev = &map->header; 580 map->needs_wakeup = FALSE; 581 map->system_map = 0; 582 map->min_offset = min; 583 map->max_offset = max; 584 map->flags = 0; 585 map->root = NULL; 586 map->timestamp = 0; 587 } 588 589 void 590 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 591 { 592 _vm_map_init(map, min, max); 593 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 594 sx_init(&map->lock, "user map"); 595 } 596 597 /* 598 * vm_map_entry_dispose: [ internal use only ] 599 * 600 * Inverse of vm_map_entry_create. 601 */ 602 static void 603 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 604 { 605 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 606 } 607 608 /* 609 * vm_map_entry_create: [ internal use only ] 610 * 611 * Allocates a VM map entry for insertion. 612 * No entry fields are filled in. 613 */ 614 static vm_map_entry_t 615 vm_map_entry_create(vm_map_t map) 616 { 617 vm_map_entry_t new_entry; 618 619 if (map->system_map) 620 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 621 else 622 new_entry = uma_zalloc(mapentzone, M_WAITOK); 623 if (new_entry == NULL) 624 panic("vm_map_entry_create: kernel resources exhausted"); 625 return (new_entry); 626 } 627 628 /* 629 * vm_map_entry_set_behavior: 630 * 631 * Set the expected access behavior, either normal, random, or 632 * sequential. 633 */ 634 static inline void 635 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 636 { 637 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 638 (behavior & MAP_ENTRY_BEHAV_MASK); 639 } 640 641 /* 642 * vm_map_entry_set_max_free: 643 * 644 * Set the max_free field in a vm_map_entry. 645 */ 646 static inline void 647 vm_map_entry_set_max_free(vm_map_entry_t entry) 648 { 649 650 entry->max_free = entry->adj_free; 651 if (entry->left != NULL && entry->left->max_free > entry->max_free) 652 entry->max_free = entry->left->max_free; 653 if (entry->right != NULL && entry->right->max_free > entry->max_free) 654 entry->max_free = entry->right->max_free; 655 } 656 657 /* 658 * vm_map_entry_splay: 659 * 660 * The Sleator and Tarjan top-down splay algorithm with the 661 * following variation. Max_free must be computed bottom-up, so 662 * on the downward pass, maintain the left and right spines in 663 * reverse order. Then, make a second pass up each side to fix 664 * the pointers and compute max_free. The time bound is O(log n) 665 * amortized. 666 * 667 * The new root is the vm_map_entry containing "addr", or else an 668 * adjacent entry (lower or higher) if addr is not in the tree. 669 * 670 * The map must be locked, and leaves it so. 671 * 672 * Returns: the new root. 673 */ 674 static vm_map_entry_t 675 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) 676 { 677 vm_map_entry_t llist, rlist; 678 vm_map_entry_t ltree, rtree; 679 vm_map_entry_t y; 680 681 /* Special case of empty tree. */ 682 if (root == NULL) 683 return (root); 684 685 /* 686 * Pass One: Splay down the tree until we find addr or a NULL 687 * pointer where addr would go. llist and rlist are the two 688 * sides in reverse order (bottom-up), with llist linked by 689 * the right pointer and rlist linked by the left pointer in 690 * the vm_map_entry. Wait until Pass Two to set max_free on 691 * the two spines. 692 */ 693 llist = NULL; 694 rlist = NULL; 695 for (;;) { 696 /* root is never NULL in here. */ 697 if (addr < root->start) { 698 y = root->left; 699 if (y == NULL) 700 break; 701 if (addr < y->start && y->left != NULL) { 702 /* Rotate right and put y on rlist. */ 703 root->left = y->right; 704 y->right = root; 705 vm_map_entry_set_max_free(root); 706 root = y->left; 707 y->left = rlist; 708 rlist = y; 709 } else { 710 /* Put root on rlist. */ 711 root->left = rlist; 712 rlist = root; 713 root = y; 714 } 715 } else { 716 y = root->right; 717 if (addr < root->end || y == NULL) 718 break; 719 if (addr >= y->end && y->right != NULL) { 720 /* Rotate left and put y on llist. */ 721 root->right = y->left; 722 y->left = root; 723 vm_map_entry_set_max_free(root); 724 root = y->right; 725 y->right = llist; 726 llist = y; 727 } else { 728 /* Put root on llist. */ 729 root->right = llist; 730 llist = root; 731 root = y; 732 } 733 } 734 } 735 736 /* 737 * Pass Two: Walk back up the two spines, flip the pointers 738 * and set max_free. The subtrees of the root go at the 739 * bottom of llist and rlist. 740 */ 741 ltree = root->left; 742 while (llist != NULL) { 743 y = llist->right; 744 llist->right = ltree; 745 vm_map_entry_set_max_free(llist); 746 ltree = llist; 747 llist = y; 748 } 749 rtree = root->right; 750 while (rlist != NULL) { 751 y = rlist->left; 752 rlist->left = rtree; 753 vm_map_entry_set_max_free(rlist); 754 rtree = rlist; 755 rlist = y; 756 } 757 758 /* 759 * Final assembly: add ltree and rtree as subtrees of root. 760 */ 761 root->left = ltree; 762 root->right = rtree; 763 vm_map_entry_set_max_free(root); 764 765 return (root); 766 } 767 768 /* 769 * vm_map_entry_{un,}link: 770 * 771 * Insert/remove entries from maps. 772 */ 773 static void 774 vm_map_entry_link(vm_map_t map, 775 vm_map_entry_t after_where, 776 vm_map_entry_t entry) 777 { 778 779 CTR4(KTR_VM, 780 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 781 map->nentries, entry, after_where); 782 map->nentries++; 783 entry->prev = after_where; 784 entry->next = after_where->next; 785 entry->next->prev = entry; 786 after_where->next = entry; 787 788 if (after_where != &map->header) { 789 if (after_where != map->root) 790 vm_map_entry_splay(after_where->start, map->root); 791 entry->right = after_where->right; 792 entry->left = after_where; 793 after_where->right = NULL; 794 after_where->adj_free = entry->start - after_where->end; 795 vm_map_entry_set_max_free(after_where); 796 } else { 797 entry->right = map->root; 798 entry->left = NULL; 799 } 800 entry->adj_free = (entry->next == &map->header ? map->max_offset : 801 entry->next->start) - entry->end; 802 vm_map_entry_set_max_free(entry); 803 map->root = entry; 804 } 805 806 static void 807 vm_map_entry_unlink(vm_map_t map, 808 vm_map_entry_t entry) 809 { 810 vm_map_entry_t next, prev, root; 811 812 if (entry != map->root) 813 vm_map_entry_splay(entry->start, map->root); 814 if (entry->left == NULL) 815 root = entry->right; 816 else { 817 root = vm_map_entry_splay(entry->start, entry->left); 818 root->right = entry->right; 819 root->adj_free = (entry->next == &map->header ? map->max_offset : 820 entry->next->start) - root->end; 821 vm_map_entry_set_max_free(root); 822 } 823 map->root = root; 824 825 prev = entry->prev; 826 next = entry->next; 827 next->prev = prev; 828 prev->next = next; 829 map->nentries--; 830 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 831 map->nentries, entry); 832 } 833 834 /* 835 * vm_map_entry_resize_free: 836 * 837 * Recompute the amount of free space following a vm_map_entry 838 * and propagate that value up the tree. Call this function after 839 * resizing a map entry in-place, that is, without a call to 840 * vm_map_entry_link() or _unlink(). 841 * 842 * The map must be locked, and leaves it so. 843 */ 844 static void 845 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) 846 { 847 848 /* 849 * Using splay trees without parent pointers, propagating 850 * max_free up the tree is done by moving the entry to the 851 * root and making the change there. 852 */ 853 if (entry != map->root) 854 map->root = vm_map_entry_splay(entry->start, map->root); 855 856 entry->adj_free = (entry->next == &map->header ? map->max_offset : 857 entry->next->start) - entry->end; 858 vm_map_entry_set_max_free(entry); 859 } 860 861 /* 862 * vm_map_lookup_entry: [ internal use only ] 863 * 864 * Finds the map entry containing (or 865 * immediately preceding) the specified address 866 * in the given map; the entry is returned 867 * in the "entry" parameter. The boolean 868 * result indicates whether the address is 869 * actually contained in the map. 870 */ 871 boolean_t 872 vm_map_lookup_entry( 873 vm_map_t map, 874 vm_offset_t address, 875 vm_map_entry_t *entry) /* OUT */ 876 { 877 vm_map_entry_t cur; 878 879 cur = vm_map_entry_splay(address, map->root); 880 if (cur == NULL) 881 *entry = &map->header; 882 else { 883 map->root = cur; 884 885 if (address >= cur->start) { 886 *entry = cur; 887 if (cur->end > address) 888 return (TRUE); 889 } else 890 *entry = cur->prev; 891 } 892 return (FALSE); 893 } 894 895 /* 896 * vm_map_insert: 897 * 898 * Inserts the given whole VM object into the target 899 * map at the specified address range. The object's 900 * size should match that of the address range. 901 * 902 * Requires that the map be locked, and leaves it so. 903 * 904 * If object is non-NULL, ref count must be bumped by caller 905 * prior to making call to account for the new entry. 906 */ 907 int 908 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 909 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 910 int cow) 911 { 912 vm_map_entry_t new_entry; 913 vm_map_entry_t prev_entry; 914 vm_map_entry_t temp_entry; 915 vm_eflags_t protoeflags; 916 917 /* 918 * Check that the start and end points are not bogus. 919 */ 920 if ((start < map->min_offset) || (end > map->max_offset) || 921 (start >= end)) 922 return (KERN_INVALID_ADDRESS); 923 924 /* 925 * Find the entry prior to the proposed starting address; if it's part 926 * of an existing entry, this range is bogus. 927 */ 928 if (vm_map_lookup_entry(map, start, &temp_entry)) 929 return (KERN_NO_SPACE); 930 931 prev_entry = temp_entry; 932 933 /* 934 * Assert that the next entry doesn't overlap the end point. 935 */ 936 if ((prev_entry->next != &map->header) && 937 (prev_entry->next->start < end)) 938 return (KERN_NO_SPACE); 939 940 protoeflags = 0; 941 942 if (cow & MAP_COPY_ON_WRITE) 943 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 944 945 if (cow & MAP_NOFAULT) { 946 protoeflags |= MAP_ENTRY_NOFAULT; 947 948 KASSERT(object == NULL, 949 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 950 } 951 if (cow & MAP_DISABLE_SYNCER) 952 protoeflags |= MAP_ENTRY_NOSYNC; 953 if (cow & MAP_DISABLE_COREDUMP) 954 protoeflags |= MAP_ENTRY_NOCOREDUMP; 955 956 if (object != NULL) { 957 /* 958 * OBJ_ONEMAPPING must be cleared unless this mapping 959 * is trivially proven to be the only mapping for any 960 * of the object's pages. (Object granularity 961 * reference counting is insufficient to recognize 962 * aliases with precision.) 963 */ 964 VM_OBJECT_LOCK(object); 965 if (object->ref_count > 1 || object->shadow_count != 0) 966 vm_object_clear_flag(object, OBJ_ONEMAPPING); 967 VM_OBJECT_UNLOCK(object); 968 } 969 else if ((prev_entry != &map->header) && 970 (prev_entry->eflags == protoeflags) && 971 (prev_entry->end == start) && 972 (prev_entry->wired_count == 0) && 973 ((prev_entry->object.vm_object == NULL) || 974 vm_object_coalesce(prev_entry->object.vm_object, 975 prev_entry->offset, 976 (vm_size_t)(prev_entry->end - prev_entry->start), 977 (vm_size_t)(end - prev_entry->end)))) { 978 /* 979 * We were able to extend the object. Determine if we 980 * can extend the previous map entry to include the 981 * new range as well. 982 */ 983 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 984 (prev_entry->protection == prot) && 985 (prev_entry->max_protection == max)) { 986 map->size += (end - prev_entry->end); 987 prev_entry->end = end; 988 vm_map_entry_resize_free(map, prev_entry); 989 vm_map_simplify_entry(map, prev_entry); 990 return (KERN_SUCCESS); 991 } 992 993 /* 994 * If we can extend the object but cannot extend the 995 * map entry, we have to create a new map entry. We 996 * must bump the ref count on the extended object to 997 * account for it. object may be NULL. 998 */ 999 object = prev_entry->object.vm_object; 1000 offset = prev_entry->offset + 1001 (prev_entry->end - prev_entry->start); 1002 vm_object_reference(object); 1003 } 1004 1005 /* 1006 * NOTE: if conditionals fail, object can be NULL here. This occurs 1007 * in things like the buffer map where we manage kva but do not manage 1008 * backing objects. 1009 */ 1010 1011 /* 1012 * Create a new entry 1013 */ 1014 new_entry = vm_map_entry_create(map); 1015 new_entry->start = start; 1016 new_entry->end = end; 1017 1018 new_entry->eflags = protoeflags; 1019 new_entry->object.vm_object = object; 1020 new_entry->offset = offset; 1021 new_entry->avail_ssize = 0; 1022 1023 new_entry->inheritance = VM_INHERIT_DEFAULT; 1024 new_entry->protection = prot; 1025 new_entry->max_protection = max; 1026 new_entry->wired_count = 0; 1027 1028 /* 1029 * Insert the new entry into the list 1030 */ 1031 vm_map_entry_link(map, prev_entry, new_entry); 1032 map->size += new_entry->end - new_entry->start; 1033 1034 #if 0 1035 /* 1036 * Temporarily removed to avoid MAP_STACK panic, due to 1037 * MAP_STACK being a huge hack. Will be added back in 1038 * when MAP_STACK (and the user stack mapping) is fixed. 1039 */ 1040 /* 1041 * It may be possible to simplify the entry 1042 */ 1043 vm_map_simplify_entry(map, new_entry); 1044 #endif 1045 1046 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 1047 vm_map_pmap_enter(map, start, prot, 1048 object, OFF_TO_IDX(offset), end - start, 1049 cow & MAP_PREFAULT_PARTIAL); 1050 } 1051 1052 return (KERN_SUCCESS); 1053 } 1054 1055 /* 1056 * vm_map_findspace: 1057 * 1058 * Find the first fit (lowest VM address) for "length" free bytes 1059 * beginning at address >= start in the given map. 1060 * 1061 * In a vm_map_entry, "adj_free" is the amount of free space 1062 * adjacent (higher address) to this entry, and "max_free" is the 1063 * maximum amount of contiguous free space in its subtree. This 1064 * allows finding a free region in one path down the tree, so 1065 * O(log n) amortized with splay trees. 1066 * 1067 * The map must be locked, and leaves it so. 1068 * 1069 * Returns: 0 on success, and starting address in *addr, 1070 * 1 if insufficient space. 1071 */ 1072 int 1073 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1074 vm_offset_t *addr) /* OUT */ 1075 { 1076 vm_map_entry_t entry; 1077 vm_offset_t end, st; 1078 1079 /* 1080 * Request must fit within min/max VM address and must avoid 1081 * address wrap. 1082 */ 1083 if (start < map->min_offset) 1084 start = map->min_offset; 1085 if (start + length > map->max_offset || start + length < start) 1086 return (1); 1087 1088 /* Empty tree means wide open address space. */ 1089 if (map->root == NULL) { 1090 *addr = start; 1091 goto found; 1092 } 1093 1094 /* 1095 * After splay, if start comes before root node, then there 1096 * must be a gap from start to the root. 1097 */ 1098 map->root = vm_map_entry_splay(start, map->root); 1099 if (start + length <= map->root->start) { 1100 *addr = start; 1101 goto found; 1102 } 1103 1104 /* 1105 * Root is the last node that might begin its gap before 1106 * start, and this is the last comparison where address 1107 * wrap might be a problem. 1108 */ 1109 st = (start > map->root->end) ? start : map->root->end; 1110 if (length <= map->root->end + map->root->adj_free - st) { 1111 *addr = st; 1112 goto found; 1113 } 1114 1115 /* With max_free, can immediately tell if no solution. */ 1116 entry = map->root->right; 1117 if (entry == NULL || length > entry->max_free) 1118 return (1); 1119 1120 /* 1121 * Search the right subtree in the order: left subtree, root, 1122 * right subtree (first fit). The previous splay implies that 1123 * all regions in the right subtree have addresses > start. 1124 */ 1125 while (entry != NULL) { 1126 if (entry->left != NULL && entry->left->max_free >= length) 1127 entry = entry->left; 1128 else if (entry->adj_free >= length) { 1129 *addr = entry->end; 1130 goto found; 1131 } else 1132 entry = entry->right; 1133 } 1134 1135 /* Can't get here, so panic if we do. */ 1136 panic("vm_map_findspace: max_free corrupt"); 1137 1138 found: 1139 /* Expand the kernel pmap, if necessary. */ 1140 if (map == kernel_map) { 1141 end = round_page(*addr + length); 1142 if (end > kernel_vm_end) 1143 pmap_growkernel(end); 1144 } 1145 return (0); 1146 } 1147 1148 /* 1149 * vm_map_find finds an unallocated region in the target address 1150 * map with the given length. The search is defined to be 1151 * first-fit from the specified address; the region found is 1152 * returned in the same parameter. 1153 * 1154 * If object is non-NULL, ref count must be bumped by caller 1155 * prior to making call to account for the new entry. 1156 */ 1157 int 1158 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1159 vm_offset_t *addr, /* IN/OUT */ 1160 vm_size_t length, boolean_t find_space, vm_prot_t prot, 1161 vm_prot_t max, int cow) 1162 { 1163 vm_offset_t start; 1164 int result; 1165 1166 start = *addr; 1167 vm_map_lock(map); 1168 if (find_space) { 1169 if (vm_map_findspace(map, start, length, addr)) { 1170 vm_map_unlock(map); 1171 return (KERN_NO_SPACE); 1172 } 1173 start = *addr; 1174 } 1175 result = vm_map_insert(map, object, offset, 1176 start, start + length, prot, max, cow); 1177 vm_map_unlock(map); 1178 return (result); 1179 } 1180 1181 /* 1182 * vm_map_simplify_entry: 1183 * 1184 * Simplify the given map entry by merging with either neighbor. This 1185 * routine also has the ability to merge with both neighbors. 1186 * 1187 * The map must be locked. 1188 * 1189 * This routine guarentees that the passed entry remains valid (though 1190 * possibly extended). When merging, this routine may delete one or 1191 * both neighbors. 1192 */ 1193 void 1194 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1195 { 1196 vm_map_entry_t next, prev; 1197 vm_size_t prevsize, esize; 1198 1199 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1200 return; 1201 1202 prev = entry->prev; 1203 if (prev != &map->header) { 1204 prevsize = prev->end - prev->start; 1205 if ( (prev->end == entry->start) && 1206 (prev->object.vm_object == entry->object.vm_object) && 1207 (!prev->object.vm_object || 1208 (prev->offset + prevsize == entry->offset)) && 1209 (prev->eflags == entry->eflags) && 1210 (prev->protection == entry->protection) && 1211 (prev->max_protection == entry->max_protection) && 1212 (prev->inheritance == entry->inheritance) && 1213 (prev->wired_count == entry->wired_count)) { 1214 vm_map_entry_unlink(map, prev); 1215 entry->start = prev->start; 1216 entry->offset = prev->offset; 1217 if (entry->prev != &map->header) 1218 vm_map_entry_resize_free(map, entry->prev); 1219 if (prev->object.vm_object) 1220 vm_object_deallocate(prev->object.vm_object); 1221 vm_map_entry_dispose(map, prev); 1222 } 1223 } 1224 1225 next = entry->next; 1226 if (next != &map->header) { 1227 esize = entry->end - entry->start; 1228 if ((entry->end == next->start) && 1229 (next->object.vm_object == entry->object.vm_object) && 1230 (!entry->object.vm_object || 1231 (entry->offset + esize == next->offset)) && 1232 (next->eflags == entry->eflags) && 1233 (next->protection == entry->protection) && 1234 (next->max_protection == entry->max_protection) && 1235 (next->inheritance == entry->inheritance) && 1236 (next->wired_count == entry->wired_count)) { 1237 vm_map_entry_unlink(map, next); 1238 entry->end = next->end; 1239 vm_map_entry_resize_free(map, entry); 1240 if (next->object.vm_object) 1241 vm_object_deallocate(next->object.vm_object); 1242 vm_map_entry_dispose(map, next); 1243 } 1244 } 1245 } 1246 /* 1247 * vm_map_clip_start: [ internal use only ] 1248 * 1249 * Asserts that the given entry begins at or after 1250 * the specified address; if necessary, 1251 * it splits the entry into two. 1252 */ 1253 #define vm_map_clip_start(map, entry, startaddr) \ 1254 { \ 1255 if (startaddr > entry->start) \ 1256 _vm_map_clip_start(map, entry, startaddr); \ 1257 } 1258 1259 /* 1260 * This routine is called only when it is known that 1261 * the entry must be split. 1262 */ 1263 static void 1264 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1265 { 1266 vm_map_entry_t new_entry; 1267 1268 /* 1269 * Split off the front portion -- note that we must insert the new 1270 * entry BEFORE this one, so that this entry has the specified 1271 * starting address. 1272 */ 1273 vm_map_simplify_entry(map, entry); 1274 1275 /* 1276 * If there is no object backing this entry, we might as well create 1277 * one now. If we defer it, an object can get created after the map 1278 * is clipped, and individual objects will be created for the split-up 1279 * map. This is a bit of a hack, but is also about the best place to 1280 * put this improvement. 1281 */ 1282 if (entry->object.vm_object == NULL && !map->system_map) { 1283 vm_object_t object; 1284 object = vm_object_allocate(OBJT_DEFAULT, 1285 atop(entry->end - entry->start)); 1286 entry->object.vm_object = object; 1287 entry->offset = 0; 1288 } 1289 1290 new_entry = vm_map_entry_create(map); 1291 *new_entry = *entry; 1292 1293 new_entry->end = start; 1294 entry->offset += (start - entry->start); 1295 entry->start = start; 1296 1297 vm_map_entry_link(map, entry->prev, new_entry); 1298 1299 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1300 vm_object_reference(new_entry->object.vm_object); 1301 } 1302 } 1303 1304 /* 1305 * vm_map_clip_end: [ internal use only ] 1306 * 1307 * Asserts that the given entry ends at or before 1308 * the specified address; if necessary, 1309 * it splits the entry into two. 1310 */ 1311 #define vm_map_clip_end(map, entry, endaddr) \ 1312 { \ 1313 if ((endaddr) < (entry->end)) \ 1314 _vm_map_clip_end((map), (entry), (endaddr)); \ 1315 } 1316 1317 /* 1318 * This routine is called only when it is known that 1319 * the entry must be split. 1320 */ 1321 static void 1322 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1323 { 1324 vm_map_entry_t new_entry; 1325 1326 /* 1327 * If there is no object backing this entry, we might as well create 1328 * one now. If we defer it, an object can get created after the map 1329 * is clipped, and individual objects will be created for the split-up 1330 * map. This is a bit of a hack, but is also about the best place to 1331 * put this improvement. 1332 */ 1333 if (entry->object.vm_object == NULL && !map->system_map) { 1334 vm_object_t object; 1335 object = vm_object_allocate(OBJT_DEFAULT, 1336 atop(entry->end - entry->start)); 1337 entry->object.vm_object = object; 1338 entry->offset = 0; 1339 } 1340 1341 /* 1342 * Create a new entry and insert it AFTER the specified entry 1343 */ 1344 new_entry = vm_map_entry_create(map); 1345 *new_entry = *entry; 1346 1347 new_entry->start = entry->end = end; 1348 new_entry->offset += (end - entry->start); 1349 1350 vm_map_entry_link(map, entry, new_entry); 1351 1352 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1353 vm_object_reference(new_entry->object.vm_object); 1354 } 1355 } 1356 1357 /* 1358 * VM_MAP_RANGE_CHECK: [ internal use only ] 1359 * 1360 * Asserts that the starting and ending region 1361 * addresses fall within the valid range of the map. 1362 */ 1363 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1364 { \ 1365 if (start < vm_map_min(map)) \ 1366 start = vm_map_min(map); \ 1367 if (end > vm_map_max(map)) \ 1368 end = vm_map_max(map); \ 1369 if (start > end) \ 1370 start = end; \ 1371 } 1372 1373 /* 1374 * vm_map_submap: [ kernel use only ] 1375 * 1376 * Mark the given range as handled by a subordinate map. 1377 * 1378 * This range must have been created with vm_map_find, 1379 * and no other operations may have been performed on this 1380 * range prior to calling vm_map_submap. 1381 * 1382 * Only a limited number of operations can be performed 1383 * within this rage after calling vm_map_submap: 1384 * vm_fault 1385 * [Don't try vm_map_copy!] 1386 * 1387 * To remove a submapping, one must first remove the 1388 * range from the superior map, and then destroy the 1389 * submap (if desired). [Better yet, don't try it.] 1390 */ 1391 int 1392 vm_map_submap( 1393 vm_map_t map, 1394 vm_offset_t start, 1395 vm_offset_t end, 1396 vm_map_t submap) 1397 { 1398 vm_map_entry_t entry; 1399 int result = KERN_INVALID_ARGUMENT; 1400 1401 vm_map_lock(map); 1402 1403 VM_MAP_RANGE_CHECK(map, start, end); 1404 1405 if (vm_map_lookup_entry(map, start, &entry)) { 1406 vm_map_clip_start(map, entry, start); 1407 } else 1408 entry = entry->next; 1409 1410 vm_map_clip_end(map, entry, end); 1411 1412 if ((entry->start == start) && (entry->end == end) && 1413 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1414 (entry->object.vm_object == NULL)) { 1415 entry->object.sub_map = submap; 1416 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1417 result = KERN_SUCCESS; 1418 } 1419 vm_map_unlock(map); 1420 1421 return (result); 1422 } 1423 1424 /* 1425 * The maximum number of pages to map 1426 */ 1427 #define MAX_INIT_PT 96 1428 1429 /* 1430 * vm_map_pmap_enter: 1431 * 1432 * Preload read-only mappings for the given object into the specified 1433 * map. This eliminates the soft faults on process startup and 1434 * immediately after an mmap(2). 1435 */ 1436 void 1437 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1438 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1439 { 1440 vm_offset_t start; 1441 vm_page_t p, p_start; 1442 vm_pindex_t psize, tmpidx; 1443 boolean_t are_queues_locked; 1444 1445 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 1446 return; 1447 VM_OBJECT_LOCK(object); 1448 if (object->type == OBJT_DEVICE) { 1449 pmap_object_init_pt(map->pmap, addr, object, pindex, size); 1450 goto unlock_return; 1451 } 1452 1453 psize = atop(size); 1454 1455 if (object->type != OBJT_VNODE || 1456 ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 1457 (object->resident_page_count > MAX_INIT_PT))) { 1458 goto unlock_return; 1459 } 1460 1461 if (psize + pindex > object->size) { 1462 if (object->size < pindex) 1463 goto unlock_return; 1464 psize = object->size - pindex; 1465 } 1466 1467 are_queues_locked = FALSE; 1468 start = 0; 1469 p_start = NULL; 1470 1471 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1472 if (p->pindex < pindex) { 1473 p = vm_page_splay(pindex, object->root); 1474 if ((object->root = p)->pindex < pindex) 1475 p = TAILQ_NEXT(p, listq); 1476 } 1477 } 1478 /* 1479 * Assert: the variable p is either (1) the page with the 1480 * least pindex greater than or equal to the parameter pindex 1481 * or (2) NULL. 1482 */ 1483 for (; 1484 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1485 p = TAILQ_NEXT(p, listq)) { 1486 /* 1487 * don't allow an madvise to blow away our really 1488 * free pages allocating pv entries. 1489 */ 1490 if ((flags & MAP_PREFAULT_MADVISE) && 1491 cnt.v_free_count < cnt.v_free_reserved) { 1492 psize = tmpidx; 1493 break; 1494 } 1495 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 1496 (p->busy == 0)) { 1497 if (p_start == NULL) { 1498 start = addr + ptoa(tmpidx); 1499 p_start = p; 1500 } 1501 if (!are_queues_locked) { 1502 are_queues_locked = TRUE; 1503 vm_page_lock_queues(); 1504 } 1505 if (VM_PAGE_INQUEUE1(p, PQ_CACHE)) 1506 vm_page_deactivate(p); 1507 } else if (p_start != NULL) { 1508 pmap_enter_object(map->pmap, start, addr + 1509 ptoa(tmpidx), p_start, prot); 1510 p_start = NULL; 1511 } 1512 } 1513 if (p_start != NULL) 1514 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 1515 p_start, prot); 1516 if (are_queues_locked) 1517 vm_page_unlock_queues(); 1518 unlock_return: 1519 VM_OBJECT_UNLOCK(object); 1520 } 1521 1522 /* 1523 * vm_map_protect: 1524 * 1525 * Sets the protection of the specified address 1526 * region in the target map. If "set_max" is 1527 * specified, the maximum protection is to be set; 1528 * otherwise, only the current protection is affected. 1529 */ 1530 int 1531 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1532 vm_prot_t new_prot, boolean_t set_max) 1533 { 1534 vm_map_entry_t current; 1535 vm_map_entry_t entry; 1536 1537 vm_map_lock(map); 1538 1539 VM_MAP_RANGE_CHECK(map, start, end); 1540 1541 if (vm_map_lookup_entry(map, start, &entry)) { 1542 vm_map_clip_start(map, entry, start); 1543 } else { 1544 entry = entry->next; 1545 } 1546 1547 /* 1548 * Make a first pass to check for protection violations. 1549 */ 1550 current = entry; 1551 while ((current != &map->header) && (current->start < end)) { 1552 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1553 vm_map_unlock(map); 1554 return (KERN_INVALID_ARGUMENT); 1555 } 1556 if ((new_prot & current->max_protection) != new_prot) { 1557 vm_map_unlock(map); 1558 return (KERN_PROTECTION_FAILURE); 1559 } 1560 current = current->next; 1561 } 1562 1563 /* 1564 * Go back and fix up protections. [Note that clipping is not 1565 * necessary the second time.] 1566 */ 1567 current = entry; 1568 while ((current != &map->header) && (current->start < end)) { 1569 vm_prot_t old_prot; 1570 1571 vm_map_clip_end(map, current, end); 1572 1573 old_prot = current->protection; 1574 if (set_max) 1575 current->protection = 1576 (current->max_protection = new_prot) & 1577 old_prot; 1578 else 1579 current->protection = new_prot; 1580 1581 /* 1582 * Update physical map if necessary. Worry about copy-on-write 1583 * here -- CHECK THIS XXX 1584 */ 1585 if (current->protection != old_prot) { 1586 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1587 VM_PROT_ALL) 1588 pmap_protect(map->pmap, current->start, 1589 current->end, 1590 current->protection & MASK(current)); 1591 #undef MASK 1592 } 1593 vm_map_simplify_entry(map, current); 1594 current = current->next; 1595 } 1596 vm_map_unlock(map); 1597 return (KERN_SUCCESS); 1598 } 1599 1600 /* 1601 * vm_map_madvise: 1602 * 1603 * This routine traverses a processes map handling the madvise 1604 * system call. Advisories are classified as either those effecting 1605 * the vm_map_entry structure, or those effecting the underlying 1606 * objects. 1607 */ 1608 int 1609 vm_map_madvise( 1610 vm_map_t map, 1611 vm_offset_t start, 1612 vm_offset_t end, 1613 int behav) 1614 { 1615 vm_map_entry_t current, entry; 1616 int modify_map = 0; 1617 1618 /* 1619 * Some madvise calls directly modify the vm_map_entry, in which case 1620 * we need to use an exclusive lock on the map and we need to perform 1621 * various clipping operations. Otherwise we only need a read-lock 1622 * on the map. 1623 */ 1624 switch(behav) { 1625 case MADV_NORMAL: 1626 case MADV_SEQUENTIAL: 1627 case MADV_RANDOM: 1628 case MADV_NOSYNC: 1629 case MADV_AUTOSYNC: 1630 case MADV_NOCORE: 1631 case MADV_CORE: 1632 modify_map = 1; 1633 vm_map_lock(map); 1634 break; 1635 case MADV_WILLNEED: 1636 case MADV_DONTNEED: 1637 case MADV_FREE: 1638 vm_map_lock_read(map); 1639 break; 1640 default: 1641 return (KERN_INVALID_ARGUMENT); 1642 } 1643 1644 /* 1645 * Locate starting entry and clip if necessary. 1646 */ 1647 VM_MAP_RANGE_CHECK(map, start, end); 1648 1649 if (vm_map_lookup_entry(map, start, &entry)) { 1650 if (modify_map) 1651 vm_map_clip_start(map, entry, start); 1652 } else { 1653 entry = entry->next; 1654 } 1655 1656 if (modify_map) { 1657 /* 1658 * madvise behaviors that are implemented in the vm_map_entry. 1659 * 1660 * We clip the vm_map_entry so that behavioral changes are 1661 * limited to the specified address range. 1662 */ 1663 for (current = entry; 1664 (current != &map->header) && (current->start < end); 1665 current = current->next 1666 ) { 1667 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1668 continue; 1669 1670 vm_map_clip_end(map, current, end); 1671 1672 switch (behav) { 1673 case MADV_NORMAL: 1674 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1675 break; 1676 case MADV_SEQUENTIAL: 1677 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1678 break; 1679 case MADV_RANDOM: 1680 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1681 break; 1682 case MADV_NOSYNC: 1683 current->eflags |= MAP_ENTRY_NOSYNC; 1684 break; 1685 case MADV_AUTOSYNC: 1686 current->eflags &= ~MAP_ENTRY_NOSYNC; 1687 break; 1688 case MADV_NOCORE: 1689 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1690 break; 1691 case MADV_CORE: 1692 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1693 break; 1694 default: 1695 break; 1696 } 1697 vm_map_simplify_entry(map, current); 1698 } 1699 vm_map_unlock(map); 1700 } else { 1701 vm_pindex_t pindex; 1702 int count; 1703 1704 /* 1705 * madvise behaviors that are implemented in the underlying 1706 * vm_object. 1707 * 1708 * Since we don't clip the vm_map_entry, we have to clip 1709 * the vm_object pindex and count. 1710 */ 1711 for (current = entry; 1712 (current != &map->header) && (current->start < end); 1713 current = current->next 1714 ) { 1715 vm_offset_t useStart; 1716 1717 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1718 continue; 1719 1720 pindex = OFF_TO_IDX(current->offset); 1721 count = atop(current->end - current->start); 1722 useStart = current->start; 1723 1724 if (current->start < start) { 1725 pindex += atop(start - current->start); 1726 count -= atop(start - current->start); 1727 useStart = start; 1728 } 1729 if (current->end > end) 1730 count -= atop(current->end - end); 1731 1732 if (count <= 0) 1733 continue; 1734 1735 vm_object_madvise(current->object.vm_object, 1736 pindex, count, behav); 1737 if (behav == MADV_WILLNEED) { 1738 vm_map_pmap_enter(map, 1739 useStart, 1740 current->protection, 1741 current->object.vm_object, 1742 pindex, 1743 (count << PAGE_SHIFT), 1744 MAP_PREFAULT_MADVISE 1745 ); 1746 } 1747 } 1748 vm_map_unlock_read(map); 1749 } 1750 return (0); 1751 } 1752 1753 1754 /* 1755 * vm_map_inherit: 1756 * 1757 * Sets the inheritance of the specified address 1758 * range in the target map. Inheritance 1759 * affects how the map will be shared with 1760 * child maps at the time of vm_map_fork. 1761 */ 1762 int 1763 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1764 vm_inherit_t new_inheritance) 1765 { 1766 vm_map_entry_t entry; 1767 vm_map_entry_t temp_entry; 1768 1769 switch (new_inheritance) { 1770 case VM_INHERIT_NONE: 1771 case VM_INHERIT_COPY: 1772 case VM_INHERIT_SHARE: 1773 break; 1774 default: 1775 return (KERN_INVALID_ARGUMENT); 1776 } 1777 vm_map_lock(map); 1778 VM_MAP_RANGE_CHECK(map, start, end); 1779 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1780 entry = temp_entry; 1781 vm_map_clip_start(map, entry, start); 1782 } else 1783 entry = temp_entry->next; 1784 while ((entry != &map->header) && (entry->start < end)) { 1785 vm_map_clip_end(map, entry, end); 1786 entry->inheritance = new_inheritance; 1787 vm_map_simplify_entry(map, entry); 1788 entry = entry->next; 1789 } 1790 vm_map_unlock(map); 1791 return (KERN_SUCCESS); 1792 } 1793 1794 /* 1795 * vm_map_unwire: 1796 * 1797 * Implements both kernel and user unwiring. 1798 */ 1799 int 1800 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1801 int flags) 1802 { 1803 vm_map_entry_t entry, first_entry, tmp_entry; 1804 vm_offset_t saved_start; 1805 unsigned int last_timestamp; 1806 int rv; 1807 boolean_t need_wakeup, result, user_unwire; 1808 1809 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1810 vm_map_lock(map); 1811 VM_MAP_RANGE_CHECK(map, start, end); 1812 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1813 if (flags & VM_MAP_WIRE_HOLESOK) 1814 first_entry = first_entry->next; 1815 else { 1816 vm_map_unlock(map); 1817 return (KERN_INVALID_ADDRESS); 1818 } 1819 } 1820 last_timestamp = map->timestamp; 1821 entry = first_entry; 1822 while (entry != &map->header && entry->start < end) { 1823 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1824 /* 1825 * We have not yet clipped the entry. 1826 */ 1827 saved_start = (start >= entry->start) ? start : 1828 entry->start; 1829 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1830 if (vm_map_unlock_and_wait(map, user_unwire)) { 1831 /* 1832 * Allow interruption of user unwiring? 1833 */ 1834 } 1835 vm_map_lock(map); 1836 if (last_timestamp+1 != map->timestamp) { 1837 /* 1838 * Look again for the entry because the map was 1839 * modified while it was unlocked. 1840 * Specifically, the entry may have been 1841 * clipped, merged, or deleted. 1842 */ 1843 if (!vm_map_lookup_entry(map, saved_start, 1844 &tmp_entry)) { 1845 if (flags & VM_MAP_WIRE_HOLESOK) 1846 tmp_entry = tmp_entry->next; 1847 else { 1848 if (saved_start == start) { 1849 /* 1850 * First_entry has been deleted. 1851 */ 1852 vm_map_unlock(map); 1853 return (KERN_INVALID_ADDRESS); 1854 } 1855 end = saved_start; 1856 rv = KERN_INVALID_ADDRESS; 1857 goto done; 1858 } 1859 } 1860 if (entry == first_entry) 1861 first_entry = tmp_entry; 1862 else 1863 first_entry = NULL; 1864 entry = tmp_entry; 1865 } 1866 last_timestamp = map->timestamp; 1867 continue; 1868 } 1869 vm_map_clip_start(map, entry, start); 1870 vm_map_clip_end(map, entry, end); 1871 /* 1872 * Mark the entry in case the map lock is released. (See 1873 * above.) 1874 */ 1875 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1876 /* 1877 * Check the map for holes in the specified region. 1878 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1879 */ 1880 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1881 (entry->end < end && (entry->next == &map->header || 1882 entry->next->start > entry->end))) { 1883 end = entry->end; 1884 rv = KERN_INVALID_ADDRESS; 1885 goto done; 1886 } 1887 /* 1888 * If system unwiring, require that the entry is system wired. 1889 */ 1890 if (!user_unwire && 1891 vm_map_entry_system_wired_count(entry) == 0) { 1892 end = entry->end; 1893 rv = KERN_INVALID_ARGUMENT; 1894 goto done; 1895 } 1896 entry = entry->next; 1897 } 1898 rv = KERN_SUCCESS; 1899 done: 1900 need_wakeup = FALSE; 1901 if (first_entry == NULL) { 1902 result = vm_map_lookup_entry(map, start, &first_entry); 1903 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1904 first_entry = first_entry->next; 1905 else 1906 KASSERT(result, ("vm_map_unwire: lookup failed")); 1907 } 1908 entry = first_entry; 1909 while (entry != &map->header && entry->start < end) { 1910 if (rv == KERN_SUCCESS && (!user_unwire || 1911 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 1912 if (user_unwire) 1913 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1914 entry->wired_count--; 1915 if (entry->wired_count == 0) { 1916 /* 1917 * Retain the map lock. 1918 */ 1919 vm_fault_unwire(map, entry->start, entry->end, 1920 entry->object.vm_object != NULL && 1921 entry->object.vm_object->type == OBJT_DEVICE); 1922 } 1923 } 1924 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1925 ("vm_map_unwire: in-transition flag missing")); 1926 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1927 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1928 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1929 need_wakeup = TRUE; 1930 } 1931 vm_map_simplify_entry(map, entry); 1932 entry = entry->next; 1933 } 1934 vm_map_unlock(map); 1935 if (need_wakeup) 1936 vm_map_wakeup(map); 1937 return (rv); 1938 } 1939 1940 /* 1941 * vm_map_wire: 1942 * 1943 * Implements both kernel and user wiring. 1944 */ 1945 int 1946 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1947 int flags) 1948 { 1949 vm_map_entry_t entry, first_entry, tmp_entry; 1950 vm_offset_t saved_end, saved_start; 1951 unsigned int last_timestamp; 1952 int rv; 1953 boolean_t fictitious, need_wakeup, result, user_wire; 1954 1955 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1956 vm_map_lock(map); 1957 VM_MAP_RANGE_CHECK(map, start, end); 1958 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1959 if (flags & VM_MAP_WIRE_HOLESOK) 1960 first_entry = first_entry->next; 1961 else { 1962 vm_map_unlock(map); 1963 return (KERN_INVALID_ADDRESS); 1964 } 1965 } 1966 last_timestamp = map->timestamp; 1967 entry = first_entry; 1968 while (entry != &map->header && entry->start < end) { 1969 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1970 /* 1971 * We have not yet clipped the entry. 1972 */ 1973 saved_start = (start >= entry->start) ? start : 1974 entry->start; 1975 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1976 if (vm_map_unlock_and_wait(map, user_wire)) { 1977 /* 1978 * Allow interruption of user wiring? 1979 */ 1980 } 1981 vm_map_lock(map); 1982 if (last_timestamp + 1 != map->timestamp) { 1983 /* 1984 * Look again for the entry because the map was 1985 * modified while it was unlocked. 1986 * Specifically, the entry may have been 1987 * clipped, merged, or deleted. 1988 */ 1989 if (!vm_map_lookup_entry(map, saved_start, 1990 &tmp_entry)) { 1991 if (flags & VM_MAP_WIRE_HOLESOK) 1992 tmp_entry = tmp_entry->next; 1993 else { 1994 if (saved_start == start) { 1995 /* 1996 * first_entry has been deleted. 1997 */ 1998 vm_map_unlock(map); 1999 return (KERN_INVALID_ADDRESS); 2000 } 2001 end = saved_start; 2002 rv = KERN_INVALID_ADDRESS; 2003 goto done; 2004 } 2005 } 2006 if (entry == first_entry) 2007 first_entry = tmp_entry; 2008 else 2009 first_entry = NULL; 2010 entry = tmp_entry; 2011 } 2012 last_timestamp = map->timestamp; 2013 continue; 2014 } 2015 vm_map_clip_start(map, entry, start); 2016 vm_map_clip_end(map, entry, end); 2017 /* 2018 * Mark the entry in case the map lock is released. (See 2019 * above.) 2020 */ 2021 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2022 /* 2023 * 2024 */ 2025 if (entry->wired_count == 0) { 2026 entry->wired_count++; 2027 saved_start = entry->start; 2028 saved_end = entry->end; 2029 fictitious = entry->object.vm_object != NULL && 2030 entry->object.vm_object->type == OBJT_DEVICE; 2031 /* 2032 * Release the map lock, relying on the in-transition 2033 * mark. 2034 */ 2035 vm_map_unlock(map); 2036 rv = vm_fault_wire(map, saved_start, saved_end, 2037 user_wire, fictitious); 2038 vm_map_lock(map); 2039 if (last_timestamp + 1 != map->timestamp) { 2040 /* 2041 * Look again for the entry because the map was 2042 * modified while it was unlocked. The entry 2043 * may have been clipped, but NOT merged or 2044 * deleted. 2045 */ 2046 result = vm_map_lookup_entry(map, saved_start, 2047 &tmp_entry); 2048 KASSERT(result, ("vm_map_wire: lookup failed")); 2049 if (entry == first_entry) 2050 first_entry = tmp_entry; 2051 else 2052 first_entry = NULL; 2053 entry = tmp_entry; 2054 while (entry->end < saved_end) { 2055 if (rv != KERN_SUCCESS) { 2056 KASSERT(entry->wired_count == 1, 2057 ("vm_map_wire: bad count")); 2058 entry->wired_count = -1; 2059 } 2060 entry = entry->next; 2061 } 2062 } 2063 last_timestamp = map->timestamp; 2064 if (rv != KERN_SUCCESS) { 2065 KASSERT(entry->wired_count == 1, 2066 ("vm_map_wire: bad count")); 2067 /* 2068 * Assign an out-of-range value to represent 2069 * the failure to wire this entry. 2070 */ 2071 entry->wired_count = -1; 2072 end = entry->end; 2073 goto done; 2074 } 2075 } else if (!user_wire || 2076 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2077 entry->wired_count++; 2078 } 2079 /* 2080 * Check the map for holes in the specified region. 2081 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 2082 */ 2083 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 2084 (entry->end < end && (entry->next == &map->header || 2085 entry->next->start > entry->end))) { 2086 end = entry->end; 2087 rv = KERN_INVALID_ADDRESS; 2088 goto done; 2089 } 2090 entry = entry->next; 2091 } 2092 rv = KERN_SUCCESS; 2093 done: 2094 need_wakeup = FALSE; 2095 if (first_entry == NULL) { 2096 result = vm_map_lookup_entry(map, start, &first_entry); 2097 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 2098 first_entry = first_entry->next; 2099 else 2100 KASSERT(result, ("vm_map_wire: lookup failed")); 2101 } 2102 entry = first_entry; 2103 while (entry != &map->header && entry->start < end) { 2104 if (rv == KERN_SUCCESS) { 2105 if (user_wire) 2106 entry->eflags |= MAP_ENTRY_USER_WIRED; 2107 } else if (entry->wired_count == -1) { 2108 /* 2109 * Wiring failed on this entry. Thus, unwiring is 2110 * unnecessary. 2111 */ 2112 entry->wired_count = 0; 2113 } else { 2114 if (!user_wire || 2115 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 2116 entry->wired_count--; 2117 if (entry->wired_count == 0) { 2118 /* 2119 * Retain the map lock. 2120 */ 2121 vm_fault_unwire(map, entry->start, entry->end, 2122 entry->object.vm_object != NULL && 2123 entry->object.vm_object->type == OBJT_DEVICE); 2124 } 2125 } 2126 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 2127 ("vm_map_wire: in-transition flag missing")); 2128 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 2129 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 2130 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 2131 need_wakeup = TRUE; 2132 } 2133 vm_map_simplify_entry(map, entry); 2134 entry = entry->next; 2135 } 2136 vm_map_unlock(map); 2137 if (need_wakeup) 2138 vm_map_wakeup(map); 2139 return (rv); 2140 } 2141 2142 /* 2143 * vm_map_sync 2144 * 2145 * Push any dirty cached pages in the address range to their pager. 2146 * If syncio is TRUE, dirty pages are written synchronously. 2147 * If invalidate is TRUE, any cached pages are freed as well. 2148 * 2149 * If the size of the region from start to end is zero, we are 2150 * supposed to flush all modified pages within the region containing 2151 * start. Unfortunately, a region can be split or coalesced with 2152 * neighboring regions, making it difficult to determine what the 2153 * original region was. Therefore, we approximate this requirement by 2154 * flushing the current region containing start. 2155 * 2156 * Returns an error if any part of the specified range is not mapped. 2157 */ 2158 int 2159 vm_map_sync( 2160 vm_map_t map, 2161 vm_offset_t start, 2162 vm_offset_t end, 2163 boolean_t syncio, 2164 boolean_t invalidate) 2165 { 2166 vm_map_entry_t current; 2167 vm_map_entry_t entry; 2168 vm_size_t size; 2169 vm_object_t object; 2170 vm_ooffset_t offset; 2171 2172 vm_map_lock_read(map); 2173 VM_MAP_RANGE_CHECK(map, start, end); 2174 if (!vm_map_lookup_entry(map, start, &entry)) { 2175 vm_map_unlock_read(map); 2176 return (KERN_INVALID_ADDRESS); 2177 } else if (start == end) { 2178 start = entry->start; 2179 end = entry->end; 2180 } 2181 /* 2182 * Make a first pass to check for user-wired memory and holes. 2183 */ 2184 for (current = entry; current->start < end; current = current->next) { 2185 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 2186 vm_map_unlock_read(map); 2187 return (KERN_INVALID_ARGUMENT); 2188 } 2189 if (end > current->end && 2190 (current->next == &map->header || 2191 current->end != current->next->start)) { 2192 vm_map_unlock_read(map); 2193 return (KERN_INVALID_ADDRESS); 2194 } 2195 } 2196 2197 if (invalidate) 2198 pmap_remove(map->pmap, start, end); 2199 2200 /* 2201 * Make a second pass, cleaning/uncaching pages from the indicated 2202 * objects as we go. 2203 */ 2204 for (current = entry; current->start < end; current = current->next) { 2205 offset = current->offset + (start - current->start); 2206 size = (end <= current->end ? end : current->end) - start; 2207 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2208 vm_map_t smap; 2209 vm_map_entry_t tentry; 2210 vm_size_t tsize; 2211 2212 smap = current->object.sub_map; 2213 vm_map_lock_read(smap); 2214 (void) vm_map_lookup_entry(smap, offset, &tentry); 2215 tsize = tentry->end - offset; 2216 if (tsize < size) 2217 size = tsize; 2218 object = tentry->object.vm_object; 2219 offset = tentry->offset + (offset - tentry->start); 2220 vm_map_unlock_read(smap); 2221 } else { 2222 object = current->object.vm_object; 2223 } 2224 vm_object_sync(object, offset, size, syncio, invalidate); 2225 start += size; 2226 } 2227 2228 vm_map_unlock_read(map); 2229 return (KERN_SUCCESS); 2230 } 2231 2232 /* 2233 * vm_map_entry_unwire: [ internal use only ] 2234 * 2235 * Make the region specified by this entry pageable. 2236 * 2237 * The map in question should be locked. 2238 * [This is the reason for this routine's existence.] 2239 */ 2240 static void 2241 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2242 { 2243 vm_fault_unwire(map, entry->start, entry->end, 2244 entry->object.vm_object != NULL && 2245 entry->object.vm_object->type == OBJT_DEVICE); 2246 entry->wired_count = 0; 2247 } 2248 2249 /* 2250 * vm_map_entry_delete: [ internal use only ] 2251 * 2252 * Deallocate the given entry from the target map. 2253 */ 2254 static void 2255 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2256 { 2257 vm_object_t object; 2258 vm_pindex_t offidxstart, offidxend, count; 2259 2260 vm_map_entry_unlink(map, entry); 2261 map->size -= entry->end - entry->start; 2262 2263 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2264 (object = entry->object.vm_object) != NULL) { 2265 count = OFF_TO_IDX(entry->end - entry->start); 2266 offidxstart = OFF_TO_IDX(entry->offset); 2267 offidxend = offidxstart + count; 2268 VM_OBJECT_LOCK(object); 2269 if (object->ref_count != 1 && 2270 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2271 object == kernel_object || object == kmem_object)) { 2272 vm_object_collapse(object); 2273 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2274 if (object->type == OBJT_SWAP) 2275 swap_pager_freespace(object, offidxstart, count); 2276 if (offidxend >= object->size && 2277 offidxstart < object->size) 2278 object->size = offidxstart; 2279 } 2280 VM_OBJECT_UNLOCK(object); 2281 vm_object_deallocate(object); 2282 } 2283 2284 vm_map_entry_dispose(map, entry); 2285 } 2286 2287 /* 2288 * vm_map_delete: [ internal use only ] 2289 * 2290 * Deallocates the given address range from the target 2291 * map. 2292 */ 2293 int 2294 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2295 { 2296 vm_map_entry_t entry; 2297 vm_map_entry_t first_entry; 2298 2299 /* 2300 * Find the start of the region, and clip it 2301 */ 2302 if (!vm_map_lookup_entry(map, start, &first_entry)) 2303 entry = first_entry->next; 2304 else { 2305 entry = first_entry; 2306 vm_map_clip_start(map, entry, start); 2307 } 2308 2309 /* 2310 * Step through all entries in this region 2311 */ 2312 while ((entry != &map->header) && (entry->start < end)) { 2313 vm_map_entry_t next; 2314 2315 /* 2316 * Wait for wiring or unwiring of an entry to complete. 2317 * Also wait for any system wirings to disappear on 2318 * user maps. 2319 */ 2320 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 2321 (vm_map_pmap(map) != kernel_pmap && 2322 vm_map_entry_system_wired_count(entry) != 0)) { 2323 unsigned int last_timestamp; 2324 vm_offset_t saved_start; 2325 vm_map_entry_t tmp_entry; 2326 2327 saved_start = entry->start; 2328 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2329 last_timestamp = map->timestamp; 2330 (void) vm_map_unlock_and_wait(map, FALSE); 2331 vm_map_lock(map); 2332 if (last_timestamp + 1 != map->timestamp) { 2333 /* 2334 * Look again for the entry because the map was 2335 * modified while it was unlocked. 2336 * Specifically, the entry may have been 2337 * clipped, merged, or deleted. 2338 */ 2339 if (!vm_map_lookup_entry(map, saved_start, 2340 &tmp_entry)) 2341 entry = tmp_entry->next; 2342 else { 2343 entry = tmp_entry; 2344 vm_map_clip_start(map, entry, 2345 saved_start); 2346 } 2347 } 2348 continue; 2349 } 2350 vm_map_clip_end(map, entry, end); 2351 2352 next = entry->next; 2353 2354 /* 2355 * Unwire before removing addresses from the pmap; otherwise, 2356 * unwiring will put the entries back in the pmap. 2357 */ 2358 if (entry->wired_count != 0) { 2359 vm_map_entry_unwire(map, entry); 2360 } 2361 2362 pmap_remove(map->pmap, entry->start, entry->end); 2363 2364 /* 2365 * Delete the entry (which may delete the object) only after 2366 * removing all pmap entries pointing to its pages. 2367 * (Otherwise, its page frames may be reallocated, and any 2368 * modify bits will be set in the wrong object!) 2369 */ 2370 vm_map_entry_delete(map, entry); 2371 entry = next; 2372 } 2373 return (KERN_SUCCESS); 2374 } 2375 2376 /* 2377 * vm_map_remove: 2378 * 2379 * Remove the given address range from the target map. 2380 * This is the exported form of vm_map_delete. 2381 */ 2382 int 2383 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2384 { 2385 int result; 2386 2387 vm_map_lock(map); 2388 VM_MAP_RANGE_CHECK(map, start, end); 2389 result = vm_map_delete(map, start, end); 2390 vm_map_unlock(map); 2391 return (result); 2392 } 2393 2394 /* 2395 * vm_map_check_protection: 2396 * 2397 * Assert that the target map allows the specified privilege on the 2398 * entire address region given. The entire region must be allocated. 2399 * 2400 * WARNING! This code does not and should not check whether the 2401 * contents of the region is accessible. For example a smaller file 2402 * might be mapped into a larger address space. 2403 * 2404 * NOTE! This code is also called by munmap(). 2405 * 2406 * The map must be locked. A read lock is sufficient. 2407 */ 2408 boolean_t 2409 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2410 vm_prot_t protection) 2411 { 2412 vm_map_entry_t entry; 2413 vm_map_entry_t tmp_entry; 2414 2415 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2416 return (FALSE); 2417 entry = tmp_entry; 2418 2419 while (start < end) { 2420 if (entry == &map->header) 2421 return (FALSE); 2422 /* 2423 * No holes allowed! 2424 */ 2425 if (start < entry->start) 2426 return (FALSE); 2427 /* 2428 * Check protection associated with entry. 2429 */ 2430 if ((entry->protection & protection) != protection) 2431 return (FALSE); 2432 /* go to next entry */ 2433 start = entry->end; 2434 entry = entry->next; 2435 } 2436 return (TRUE); 2437 } 2438 2439 /* 2440 * vm_map_copy_entry: 2441 * 2442 * Copies the contents of the source entry to the destination 2443 * entry. The entries *must* be aligned properly. 2444 */ 2445 static void 2446 vm_map_copy_entry( 2447 vm_map_t src_map, 2448 vm_map_t dst_map, 2449 vm_map_entry_t src_entry, 2450 vm_map_entry_t dst_entry) 2451 { 2452 vm_object_t src_object; 2453 2454 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2455 return; 2456 2457 if (src_entry->wired_count == 0) { 2458 2459 /* 2460 * If the source entry is marked needs_copy, it is already 2461 * write-protected. 2462 */ 2463 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2464 pmap_protect(src_map->pmap, 2465 src_entry->start, 2466 src_entry->end, 2467 src_entry->protection & ~VM_PROT_WRITE); 2468 } 2469 2470 /* 2471 * Make a copy of the object. 2472 */ 2473 if ((src_object = src_entry->object.vm_object) != NULL) { 2474 VM_OBJECT_LOCK(src_object); 2475 if ((src_object->handle == NULL) && 2476 (src_object->type == OBJT_DEFAULT || 2477 src_object->type == OBJT_SWAP)) { 2478 vm_object_collapse(src_object); 2479 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2480 vm_object_split(src_entry); 2481 src_object = src_entry->object.vm_object; 2482 } 2483 } 2484 vm_object_reference_locked(src_object); 2485 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2486 VM_OBJECT_UNLOCK(src_object); 2487 dst_entry->object.vm_object = src_object; 2488 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2489 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2490 dst_entry->offset = src_entry->offset; 2491 } else { 2492 dst_entry->object.vm_object = NULL; 2493 dst_entry->offset = 0; 2494 } 2495 2496 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2497 dst_entry->end - dst_entry->start, src_entry->start); 2498 } else { 2499 /* 2500 * Of course, wired down pages can't be set copy-on-write. 2501 * Cause wired pages to be copied into the new map by 2502 * simulating faults (the new pages are pageable) 2503 */ 2504 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2505 } 2506 } 2507 2508 /* 2509 * vmspace_map_entry_forked: 2510 * Update the newly-forked vmspace each time a map entry is inherited 2511 * or copied. The values for vm_dsize and vm_tsize are approximate 2512 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 2513 */ 2514 static void 2515 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 2516 vm_map_entry_t entry) 2517 { 2518 vm_size_t entrysize; 2519 vm_offset_t newend; 2520 2521 entrysize = entry->end - entry->start; 2522 vm2->vm_map.size += entrysize; 2523 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 2524 vm2->vm_ssize += btoc(entrysize); 2525 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 2526 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 2527 newend = MIN(entry->end, 2528 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 2529 vm2->vm_dsize += btoc(newend - entry->start); 2530 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 2531 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 2532 newend = MIN(entry->end, 2533 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 2534 vm2->vm_tsize += btoc(newend - entry->start); 2535 } 2536 } 2537 2538 /* 2539 * vmspace_fork: 2540 * Create a new process vmspace structure and vm_map 2541 * based on those of an existing process. The new map 2542 * is based on the old map, according to the inheritance 2543 * values on the regions in that map. 2544 * 2545 * XXX It might be worth coalescing the entries added to the new vmspace. 2546 * 2547 * The source map must not be locked. 2548 */ 2549 struct vmspace * 2550 vmspace_fork(struct vmspace *vm1) 2551 { 2552 struct vmspace *vm2; 2553 vm_map_t old_map = &vm1->vm_map; 2554 vm_map_t new_map; 2555 vm_map_entry_t old_entry; 2556 vm_map_entry_t new_entry; 2557 vm_object_t object; 2558 2559 vm_map_lock(old_map); 2560 2561 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2562 vm2->vm_taddr = vm1->vm_taddr; 2563 vm2->vm_daddr = vm1->vm_daddr; 2564 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 2565 new_map = &vm2->vm_map; /* XXX */ 2566 new_map->timestamp = 1; 2567 2568 /* Do not inherit the MAP_WIREFUTURE property. */ 2569 if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) 2570 new_map->flags &= ~MAP_WIREFUTURE; 2571 2572 old_entry = old_map->header.next; 2573 2574 while (old_entry != &old_map->header) { 2575 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2576 panic("vm_map_fork: encountered a submap"); 2577 2578 switch (old_entry->inheritance) { 2579 case VM_INHERIT_NONE: 2580 break; 2581 2582 case VM_INHERIT_SHARE: 2583 /* 2584 * Clone the entry, creating the shared object if necessary. 2585 */ 2586 object = old_entry->object.vm_object; 2587 if (object == NULL) { 2588 object = vm_object_allocate(OBJT_DEFAULT, 2589 atop(old_entry->end - old_entry->start)); 2590 old_entry->object.vm_object = object; 2591 old_entry->offset = 0; 2592 } 2593 2594 /* 2595 * Add the reference before calling vm_object_shadow 2596 * to insure that a shadow object is created. 2597 */ 2598 vm_object_reference(object); 2599 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2600 vm_object_shadow(&old_entry->object.vm_object, 2601 &old_entry->offset, 2602 atop(old_entry->end - old_entry->start)); 2603 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2604 /* Transfer the second reference too. */ 2605 vm_object_reference( 2606 old_entry->object.vm_object); 2607 vm_object_deallocate(object); 2608 object = old_entry->object.vm_object; 2609 } 2610 VM_OBJECT_LOCK(object); 2611 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2612 VM_OBJECT_UNLOCK(object); 2613 2614 /* 2615 * Clone the entry, referencing the shared object. 2616 */ 2617 new_entry = vm_map_entry_create(new_map); 2618 *new_entry = *old_entry; 2619 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2620 new_entry->wired_count = 0; 2621 2622 /* 2623 * Insert the entry into the new map -- we know we're 2624 * inserting at the end of the new map. 2625 */ 2626 vm_map_entry_link(new_map, new_map->header.prev, 2627 new_entry); 2628 vmspace_map_entry_forked(vm1, vm2, new_entry); 2629 2630 /* 2631 * Update the physical map 2632 */ 2633 pmap_copy(new_map->pmap, old_map->pmap, 2634 new_entry->start, 2635 (old_entry->end - old_entry->start), 2636 old_entry->start); 2637 break; 2638 2639 case VM_INHERIT_COPY: 2640 /* 2641 * Clone the entry and link into the map. 2642 */ 2643 new_entry = vm_map_entry_create(new_map); 2644 *new_entry = *old_entry; 2645 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2646 new_entry->wired_count = 0; 2647 new_entry->object.vm_object = NULL; 2648 vm_map_entry_link(new_map, new_map->header.prev, 2649 new_entry); 2650 vmspace_map_entry_forked(vm1, vm2, new_entry); 2651 vm_map_copy_entry(old_map, new_map, old_entry, 2652 new_entry); 2653 break; 2654 } 2655 old_entry = old_entry->next; 2656 } 2657 2658 vm_map_unlock(old_map); 2659 2660 return (vm2); 2661 } 2662 2663 int 2664 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2665 vm_prot_t prot, vm_prot_t max, int cow) 2666 { 2667 vm_map_entry_t new_entry, prev_entry; 2668 vm_offset_t bot, top; 2669 vm_size_t init_ssize; 2670 int orient, rv; 2671 rlim_t vmemlim; 2672 2673 /* 2674 * The stack orientation is piggybacked with the cow argument. 2675 * Extract it into orient and mask the cow argument so that we 2676 * don't pass it around further. 2677 * NOTE: We explicitly allow bi-directional stacks. 2678 */ 2679 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 2680 cow &= ~orient; 2681 KASSERT(orient != 0, ("No stack grow direction")); 2682 2683 if (addrbos < vm_map_min(map) || addrbos > map->max_offset) 2684 return (KERN_NO_SPACE); 2685 2686 init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 2687 2688 PROC_LOCK(curthread->td_proc); 2689 vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); 2690 PROC_UNLOCK(curthread->td_proc); 2691 2692 vm_map_lock(map); 2693 2694 /* If addr is already mapped, no go */ 2695 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2696 vm_map_unlock(map); 2697 return (KERN_NO_SPACE); 2698 } 2699 2700 /* If we would blow our VMEM resource limit, no go */ 2701 if (map->size + init_ssize > vmemlim) { 2702 vm_map_unlock(map); 2703 return (KERN_NO_SPACE); 2704 } 2705 2706 /* 2707 * If we can't accomodate max_ssize in the current mapping, no go. 2708 * However, we need to be aware that subsequent user mappings might 2709 * map into the space we have reserved for stack, and currently this 2710 * space is not protected. 2711 * 2712 * Hopefully we will at least detect this condition when we try to 2713 * grow the stack. 2714 */ 2715 if ((prev_entry->next != &map->header) && 2716 (prev_entry->next->start < addrbos + max_ssize)) { 2717 vm_map_unlock(map); 2718 return (KERN_NO_SPACE); 2719 } 2720 2721 /* 2722 * We initially map a stack of only init_ssize. We will grow as 2723 * needed later. Depending on the orientation of the stack (i.e. 2724 * the grow direction) we either map at the top of the range, the 2725 * bottom of the range or in the middle. 2726 * 2727 * Note: we would normally expect prot and max to be VM_PROT_ALL, 2728 * and cow to be 0. Possibly we should eliminate these as input 2729 * parameters, and just pass these values here in the insert call. 2730 */ 2731 if (orient == MAP_STACK_GROWS_DOWN) 2732 bot = addrbos + max_ssize - init_ssize; 2733 else if (orient == MAP_STACK_GROWS_UP) 2734 bot = addrbos; 2735 else 2736 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 2737 top = bot + init_ssize; 2738 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 2739 2740 /* Now set the avail_ssize amount. */ 2741 if (rv == KERN_SUCCESS) { 2742 if (prev_entry != &map->header) 2743 vm_map_clip_end(map, prev_entry, bot); 2744 new_entry = prev_entry->next; 2745 if (new_entry->end != top || new_entry->start != bot) 2746 panic("Bad entry start/end for new stack entry"); 2747 2748 new_entry->avail_ssize = max_ssize - init_ssize; 2749 if (orient & MAP_STACK_GROWS_DOWN) 2750 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2751 if (orient & MAP_STACK_GROWS_UP) 2752 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 2753 } 2754 2755 vm_map_unlock(map); 2756 return (rv); 2757 } 2758 2759 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2760 * desired address is already mapped, or if we successfully grow 2761 * the stack. Also returns KERN_SUCCESS if addr is outside the 2762 * stack range (this is strange, but preserves compatibility with 2763 * the grow function in vm_machdep.c). 2764 */ 2765 int 2766 vm_map_growstack(struct proc *p, vm_offset_t addr) 2767 { 2768 vm_map_entry_t next_entry, prev_entry; 2769 vm_map_entry_t new_entry, stack_entry; 2770 struct vmspace *vm = p->p_vmspace; 2771 vm_map_t map = &vm->vm_map; 2772 vm_offset_t end; 2773 size_t grow_amount, max_grow; 2774 rlim_t stacklim, vmemlim; 2775 int is_procstack, rv; 2776 2777 Retry: 2778 PROC_LOCK(p); 2779 stacklim = lim_cur(p, RLIMIT_STACK); 2780 vmemlim = lim_cur(p, RLIMIT_VMEM); 2781 PROC_UNLOCK(p); 2782 2783 vm_map_lock_read(map); 2784 2785 /* If addr is already in the entry range, no need to grow.*/ 2786 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2787 vm_map_unlock_read(map); 2788 return (KERN_SUCCESS); 2789 } 2790 2791 next_entry = prev_entry->next; 2792 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 2793 /* 2794 * This entry does not grow upwards. Since the address lies 2795 * beyond this entry, the next entry (if one exists) has to 2796 * be a downward growable entry. The entry list header is 2797 * never a growable entry, so it suffices to check the flags. 2798 */ 2799 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 2800 vm_map_unlock_read(map); 2801 return (KERN_SUCCESS); 2802 } 2803 stack_entry = next_entry; 2804 } else { 2805 /* 2806 * This entry grows upward. If the next entry does not at 2807 * least grow downwards, this is the entry we need to grow. 2808 * otherwise we have two possible choices and we have to 2809 * select one. 2810 */ 2811 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 2812 /* 2813 * We have two choices; grow the entry closest to 2814 * the address to minimize the amount of growth. 2815 */ 2816 if (addr - prev_entry->end <= next_entry->start - addr) 2817 stack_entry = prev_entry; 2818 else 2819 stack_entry = next_entry; 2820 } else 2821 stack_entry = prev_entry; 2822 } 2823 2824 if (stack_entry == next_entry) { 2825 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 2826 KASSERT(addr < stack_entry->start, ("foo")); 2827 end = (prev_entry != &map->header) ? prev_entry->end : 2828 stack_entry->start - stack_entry->avail_ssize; 2829 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 2830 max_grow = stack_entry->start - end; 2831 } else { 2832 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 2833 KASSERT(addr >= stack_entry->end, ("foo")); 2834 end = (next_entry != &map->header) ? next_entry->start : 2835 stack_entry->end + stack_entry->avail_ssize; 2836 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 2837 max_grow = end - stack_entry->end; 2838 } 2839 2840 if (grow_amount > stack_entry->avail_ssize) { 2841 vm_map_unlock_read(map); 2842 return (KERN_NO_SPACE); 2843 } 2844 2845 /* 2846 * If there is no longer enough space between the entries nogo, and 2847 * adjust the available space. Note: this should only happen if the 2848 * user has mapped into the stack area after the stack was created, 2849 * and is probably an error. 2850 * 2851 * This also effectively destroys any guard page the user might have 2852 * intended by limiting the stack size. 2853 */ 2854 if (grow_amount > max_grow) { 2855 if (vm_map_lock_upgrade(map)) 2856 goto Retry; 2857 2858 stack_entry->avail_ssize = max_grow; 2859 2860 vm_map_unlock(map); 2861 return (KERN_NO_SPACE); 2862 } 2863 2864 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 2865 2866 /* 2867 * If this is the main process stack, see if we're over the stack 2868 * limit. 2869 */ 2870 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2871 vm_map_unlock_read(map); 2872 return (KERN_NO_SPACE); 2873 } 2874 2875 /* Round up the grow amount modulo SGROWSIZ */ 2876 grow_amount = roundup (grow_amount, sgrowsiz); 2877 if (grow_amount > stack_entry->avail_ssize) 2878 grow_amount = stack_entry->avail_ssize; 2879 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2880 grow_amount = stacklim - ctob(vm->vm_ssize); 2881 } 2882 2883 /* If we would blow our VMEM resource limit, no go */ 2884 if (map->size + grow_amount > vmemlim) { 2885 vm_map_unlock_read(map); 2886 return (KERN_NO_SPACE); 2887 } 2888 2889 if (vm_map_lock_upgrade(map)) 2890 goto Retry; 2891 2892 if (stack_entry == next_entry) { 2893 /* 2894 * Growing downward. 2895 */ 2896 /* Get the preliminary new entry start value */ 2897 addr = stack_entry->start - grow_amount; 2898 2899 /* 2900 * If this puts us into the previous entry, cut back our 2901 * growth to the available space. Also, see the note above. 2902 */ 2903 if (addr < end) { 2904 stack_entry->avail_ssize = max_grow; 2905 addr = end; 2906 } 2907 2908 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2909 p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 2910 2911 /* Adjust the available stack space by the amount we grew. */ 2912 if (rv == KERN_SUCCESS) { 2913 if (prev_entry != &map->header) 2914 vm_map_clip_end(map, prev_entry, addr); 2915 new_entry = prev_entry->next; 2916 KASSERT(new_entry == stack_entry->prev, ("foo")); 2917 KASSERT(new_entry->end == stack_entry->start, ("foo")); 2918 KASSERT(new_entry->start == addr, ("foo")); 2919 grow_amount = new_entry->end - new_entry->start; 2920 new_entry->avail_ssize = stack_entry->avail_ssize - 2921 grow_amount; 2922 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 2923 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2924 } 2925 } else { 2926 /* 2927 * Growing upward. 2928 */ 2929 addr = stack_entry->end + grow_amount; 2930 2931 /* 2932 * If this puts us into the next entry, cut back our growth 2933 * to the available space. Also, see the note above. 2934 */ 2935 if (addr > end) { 2936 stack_entry->avail_ssize = end - stack_entry->end; 2937 addr = end; 2938 } 2939 2940 grow_amount = addr - stack_entry->end; 2941 2942 /* Grow the underlying object if applicable. */ 2943 if (stack_entry->object.vm_object == NULL || 2944 vm_object_coalesce(stack_entry->object.vm_object, 2945 stack_entry->offset, 2946 (vm_size_t)(stack_entry->end - stack_entry->start), 2947 (vm_size_t)grow_amount)) { 2948 map->size += (addr - stack_entry->end); 2949 /* Update the current entry. */ 2950 stack_entry->end = addr; 2951 stack_entry->avail_ssize -= grow_amount; 2952 vm_map_entry_resize_free(map, stack_entry); 2953 rv = KERN_SUCCESS; 2954 2955 if (next_entry != &map->header) 2956 vm_map_clip_start(map, next_entry, addr); 2957 } else 2958 rv = KERN_FAILURE; 2959 } 2960 2961 if (rv == KERN_SUCCESS && is_procstack) 2962 vm->vm_ssize += btoc(grow_amount); 2963 2964 vm_map_unlock(map); 2965 2966 /* 2967 * Heed the MAP_WIREFUTURE flag if it was set for this process. 2968 */ 2969 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 2970 vm_map_wire(map, 2971 (stack_entry == next_entry) ? addr : addr - grow_amount, 2972 (stack_entry == next_entry) ? stack_entry->start : addr, 2973 (p->p_flag & P_SYSTEM) 2974 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 2975 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 2976 } 2977 2978 return (rv); 2979 } 2980 2981 /* 2982 * Unshare the specified VM space for exec. If other processes are 2983 * mapped to it, then create a new one. The new vmspace is null. 2984 */ 2985 void 2986 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 2987 { 2988 struct vmspace *oldvmspace = p->p_vmspace; 2989 struct vmspace *newvmspace; 2990 2991 newvmspace = vmspace_alloc(minuser, maxuser); 2992 newvmspace->vm_swrss = oldvmspace->vm_swrss; 2993 /* 2994 * This code is written like this for prototype purposes. The 2995 * goal is to avoid running down the vmspace here, but let the 2996 * other process's that are still using the vmspace to finally 2997 * run it down. Even though there is little or no chance of blocking 2998 * here, it is a good idea to keep this form for future mods. 2999 */ 3000 PROC_VMSPACE_LOCK(p); 3001 p->p_vmspace = newvmspace; 3002 PROC_VMSPACE_UNLOCK(p); 3003 if (p == curthread->td_proc) /* XXXKSE ? */ 3004 pmap_activate(curthread); 3005 vmspace_free(oldvmspace); 3006 } 3007 3008 /* 3009 * Unshare the specified VM space for forcing COW. This 3010 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3011 */ 3012 void 3013 vmspace_unshare(struct proc *p) 3014 { 3015 struct vmspace *oldvmspace = p->p_vmspace; 3016 struct vmspace *newvmspace; 3017 3018 if (oldvmspace->vm_refcnt == 1) 3019 return; 3020 newvmspace = vmspace_fork(oldvmspace); 3021 PROC_VMSPACE_LOCK(p); 3022 p->p_vmspace = newvmspace; 3023 PROC_VMSPACE_UNLOCK(p); 3024 if (p == curthread->td_proc) /* XXXKSE ? */ 3025 pmap_activate(curthread); 3026 vmspace_free(oldvmspace); 3027 } 3028 3029 /* 3030 * vm_map_lookup: 3031 * 3032 * Finds the VM object, offset, and 3033 * protection for a given virtual address in the 3034 * specified map, assuming a page fault of the 3035 * type specified. 3036 * 3037 * Leaves the map in question locked for read; return 3038 * values are guaranteed until a vm_map_lookup_done 3039 * call is performed. Note that the map argument 3040 * is in/out; the returned map must be used in 3041 * the call to vm_map_lookup_done. 3042 * 3043 * A handle (out_entry) is returned for use in 3044 * vm_map_lookup_done, to make that fast. 3045 * 3046 * If a lookup is requested with "write protection" 3047 * specified, the map may be changed to perform virtual 3048 * copying operations, although the data referenced will 3049 * remain the same. 3050 */ 3051 int 3052 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3053 vm_offset_t vaddr, 3054 vm_prot_t fault_typea, 3055 vm_map_entry_t *out_entry, /* OUT */ 3056 vm_object_t *object, /* OUT */ 3057 vm_pindex_t *pindex, /* OUT */ 3058 vm_prot_t *out_prot, /* OUT */ 3059 boolean_t *wired) /* OUT */ 3060 { 3061 vm_map_entry_t entry; 3062 vm_map_t map = *var_map; 3063 vm_prot_t prot; 3064 vm_prot_t fault_type = fault_typea; 3065 3066 RetryLookup:; 3067 /* 3068 * Lookup the faulting address. 3069 */ 3070 3071 vm_map_lock_read(map); 3072 #define RETURN(why) \ 3073 { \ 3074 vm_map_unlock_read(map); \ 3075 return (why); \ 3076 } 3077 3078 /* 3079 * If the map has an interesting hint, try it before calling full 3080 * blown lookup routine. 3081 */ 3082 entry = map->root; 3083 *out_entry = entry; 3084 if (entry == NULL || 3085 (vaddr < entry->start) || (vaddr >= entry->end)) { 3086 /* 3087 * Entry was either not a valid hint, or the vaddr was not 3088 * contained in the entry, so do a full lookup. 3089 */ 3090 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 3091 RETURN(KERN_INVALID_ADDRESS); 3092 3093 entry = *out_entry; 3094 } 3095 3096 /* 3097 * Handle submaps. 3098 */ 3099 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3100 vm_map_t old_map = map; 3101 3102 *var_map = map = entry->object.sub_map; 3103 vm_map_unlock_read(old_map); 3104 goto RetryLookup; 3105 } 3106 3107 /* 3108 * Check whether this task is allowed to have this page. 3109 * Note the special case for MAP_ENTRY_COW 3110 * pages with an override. This is to implement a forced 3111 * COW for debuggers. 3112 */ 3113 if (fault_type & VM_PROT_OVERRIDE_WRITE) 3114 prot = entry->max_protection; 3115 else 3116 prot = entry->protection; 3117 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3118 if ((fault_type & prot) != fault_type) { 3119 RETURN(KERN_PROTECTION_FAILURE); 3120 } 3121 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3122 (entry->eflags & MAP_ENTRY_COW) && 3123 (fault_type & VM_PROT_WRITE) && 3124 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 3125 RETURN(KERN_PROTECTION_FAILURE); 3126 } 3127 3128 /* 3129 * If this page is not pageable, we have to get it for all possible 3130 * accesses. 3131 */ 3132 *wired = (entry->wired_count != 0); 3133 if (*wired) 3134 prot = fault_type = entry->protection; 3135 3136 /* 3137 * If the entry was copy-on-write, we either ... 3138 */ 3139 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3140 /* 3141 * If we want to write the page, we may as well handle that 3142 * now since we've got the map locked. 3143 * 3144 * If we don't need to write the page, we just demote the 3145 * permissions allowed. 3146 */ 3147 if (fault_type & VM_PROT_WRITE) { 3148 /* 3149 * Make a new object, and place it in the object 3150 * chain. Note that no new references have appeared 3151 * -- one just moved from the map to the new 3152 * object. 3153 */ 3154 if (vm_map_lock_upgrade(map)) 3155 goto RetryLookup; 3156 3157 vm_object_shadow( 3158 &entry->object.vm_object, 3159 &entry->offset, 3160 atop(entry->end - entry->start)); 3161 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3162 3163 vm_map_lock_downgrade(map); 3164 } else { 3165 /* 3166 * We're attempting to read a copy-on-write page -- 3167 * don't allow writes. 3168 */ 3169 prot &= ~VM_PROT_WRITE; 3170 } 3171 } 3172 3173 /* 3174 * Create an object if necessary. 3175 */ 3176 if (entry->object.vm_object == NULL && 3177 !map->system_map) { 3178 if (vm_map_lock_upgrade(map)) 3179 goto RetryLookup; 3180 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3181 atop(entry->end - entry->start)); 3182 entry->offset = 0; 3183 vm_map_lock_downgrade(map); 3184 } 3185 3186 /* 3187 * Return the object/offset from this entry. If the entry was 3188 * copy-on-write or empty, it has been fixed up. 3189 */ 3190 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3191 *object = entry->object.vm_object; 3192 3193 *out_prot = prot; 3194 return (KERN_SUCCESS); 3195 3196 #undef RETURN 3197 } 3198 3199 /* 3200 * vm_map_lookup_locked: 3201 * 3202 * Lookup the faulting address. A version of vm_map_lookup that returns 3203 * KERN_FAILURE instead of blocking on map lock or memory allocation. 3204 */ 3205 int 3206 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 3207 vm_offset_t vaddr, 3208 vm_prot_t fault_typea, 3209 vm_map_entry_t *out_entry, /* OUT */ 3210 vm_object_t *object, /* OUT */ 3211 vm_pindex_t *pindex, /* OUT */ 3212 vm_prot_t *out_prot, /* OUT */ 3213 boolean_t *wired) /* OUT */ 3214 { 3215 vm_map_entry_t entry; 3216 vm_map_t map = *var_map; 3217 vm_prot_t prot; 3218 vm_prot_t fault_type = fault_typea; 3219 3220 /* 3221 * If the map has an interesting hint, try it before calling full 3222 * blown lookup routine. 3223 */ 3224 entry = map->root; 3225 *out_entry = entry; 3226 if (entry == NULL || 3227 (vaddr < entry->start) || (vaddr >= entry->end)) { 3228 /* 3229 * Entry was either not a valid hint, or the vaddr was not 3230 * contained in the entry, so do a full lookup. 3231 */ 3232 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 3233 return (KERN_INVALID_ADDRESS); 3234 3235 entry = *out_entry; 3236 } 3237 3238 /* 3239 * Fail if the entry refers to a submap. 3240 */ 3241 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3242 return (KERN_FAILURE); 3243 3244 /* 3245 * Check whether this task is allowed to have this page. 3246 * Note the special case for MAP_ENTRY_COW 3247 * pages with an override. This is to implement a forced 3248 * COW for debuggers. 3249 */ 3250 if (fault_type & VM_PROT_OVERRIDE_WRITE) 3251 prot = entry->max_protection; 3252 else 3253 prot = entry->protection; 3254 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 3255 if ((fault_type & prot) != fault_type) 3256 return (KERN_PROTECTION_FAILURE); 3257 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3258 (entry->eflags & MAP_ENTRY_COW) && 3259 (fault_type & VM_PROT_WRITE) && 3260 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) 3261 return (KERN_PROTECTION_FAILURE); 3262 3263 /* 3264 * If this page is not pageable, we have to get it for all possible 3265 * accesses. 3266 */ 3267 *wired = (entry->wired_count != 0); 3268 if (*wired) 3269 prot = fault_type = entry->protection; 3270 3271 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3272 /* 3273 * Fail if the entry was copy-on-write for a write fault. 3274 */ 3275 if (fault_type & VM_PROT_WRITE) 3276 return (KERN_FAILURE); 3277 /* 3278 * We're attempting to read a copy-on-write page -- 3279 * don't allow writes. 3280 */ 3281 prot &= ~VM_PROT_WRITE; 3282 } 3283 3284 /* 3285 * Fail if an object should be created. 3286 */ 3287 if (entry->object.vm_object == NULL && !map->system_map) 3288 return (KERN_FAILURE); 3289 3290 /* 3291 * Return the object/offset from this entry. If the entry was 3292 * copy-on-write or empty, it has been fixed up. 3293 */ 3294 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3295 *object = entry->object.vm_object; 3296 3297 *out_prot = prot; 3298 return (KERN_SUCCESS); 3299 } 3300 3301 /* 3302 * vm_map_lookup_done: 3303 * 3304 * Releases locks acquired by a vm_map_lookup 3305 * (according to the handle returned by that lookup). 3306 */ 3307 void 3308 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3309 { 3310 /* 3311 * Unlock the main-level map 3312 */ 3313 vm_map_unlock_read(map); 3314 } 3315 3316 #include "opt_ddb.h" 3317 #ifdef DDB 3318 #include <sys/kernel.h> 3319 3320 #include <ddb/ddb.h> 3321 3322 /* 3323 * vm_map_print: [ debug ] 3324 */ 3325 DB_SHOW_COMMAND(map, vm_map_print) 3326 { 3327 static int nlines; 3328 /* XXX convert args. */ 3329 vm_map_t map = (vm_map_t)addr; 3330 boolean_t full = have_addr; 3331 3332 vm_map_entry_t entry; 3333 3334 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3335 (void *)map, 3336 (void *)map->pmap, map->nentries, map->timestamp); 3337 nlines++; 3338 3339 if (!full && db_indent) 3340 return; 3341 3342 db_indent += 2; 3343 for (entry = map->header.next; entry != &map->header; 3344 entry = entry->next) { 3345 db_iprintf("map entry %p: start=%p, end=%p\n", 3346 (void *)entry, (void *)entry->start, (void *)entry->end); 3347 nlines++; 3348 { 3349 static char *inheritance_name[4] = 3350 {"share", "copy", "none", "donate_copy"}; 3351 3352 db_iprintf(" prot=%x/%x/%s", 3353 entry->protection, 3354 entry->max_protection, 3355 inheritance_name[(int)(unsigned char)entry->inheritance]); 3356 if (entry->wired_count != 0) 3357 db_printf(", wired"); 3358 } 3359 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3360 db_printf(", share=%p, offset=0x%jx\n", 3361 (void *)entry->object.sub_map, 3362 (uintmax_t)entry->offset); 3363 nlines++; 3364 if ((entry->prev == &map->header) || 3365 (entry->prev->object.sub_map != 3366 entry->object.sub_map)) { 3367 db_indent += 2; 3368 vm_map_print((db_expr_t)(intptr_t) 3369 entry->object.sub_map, 3370 full, 0, (char *)0); 3371 db_indent -= 2; 3372 } 3373 } else { 3374 db_printf(", object=%p, offset=0x%jx", 3375 (void *)entry->object.vm_object, 3376 (uintmax_t)entry->offset); 3377 if (entry->eflags & MAP_ENTRY_COW) 3378 db_printf(", copy (%s)", 3379 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3380 db_printf("\n"); 3381 nlines++; 3382 3383 if ((entry->prev == &map->header) || 3384 (entry->prev->object.vm_object != 3385 entry->object.vm_object)) { 3386 db_indent += 2; 3387 vm_object_print((db_expr_t)(intptr_t) 3388 entry->object.vm_object, 3389 full, 0, (char *)0); 3390 nlines += 4; 3391 db_indent -= 2; 3392 } 3393 } 3394 } 3395 db_indent -= 2; 3396 if (db_indent == 0) 3397 nlines = 0; 3398 } 3399 3400 3401 DB_SHOW_COMMAND(procvm, procvm) 3402 { 3403 struct proc *p; 3404 3405 if (have_addr) { 3406 p = (struct proc *) addr; 3407 } else { 3408 p = curproc; 3409 } 3410 3411 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3412 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3413 (void *)vmspace_pmap(p->p_vmspace)); 3414 3415 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3416 } 3417 3418 #endif /* DDB */ 3419