1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Virtual memory mapping module. 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/ktr.h> 75 #include <sys/lock.h> 76 #include <sys/mutex.h> 77 #include <sys/proc.h> 78 #include <sys/vmmeter.h> 79 #include <sys/mman.h> 80 #include <sys/vnode.h> 81 #include <sys/resourcevar.h> 82 #include <sys/file.h> 83 #include <sys/sysent.h> 84 #include <sys/shm.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/swap_pager.h> 96 #include <vm/uma.h> 97 98 /* 99 * Virtual memory maps provide for the mapping, protection, 100 * and sharing of virtual memory objects. In addition, 101 * this module provides for an efficient virtual copy of 102 * memory from one map to another. 103 * 104 * Synchronization is required prior to most operations. 105 * 106 * Maps consist of an ordered doubly-linked list of simple 107 * entries; a single hint is used to speed up lookups. 108 * 109 * Since portions of maps are specified by start/end addresses, 110 * which may not align with existing map entries, all 111 * routines merely "clip" entries to these start/end values. 112 * [That is, an entry is split into two, bordering at a 113 * start or end value.] Note that these clippings may not 114 * always be necessary (as the two resulting entries are then 115 * not changed); however, the clipping is done for convenience. 116 * 117 * As mentioned above, virtual copy operations are performed 118 * by copying VM object references from one map to 119 * another, and then marking both regions as copy-on-write. 120 */ 121 122 /* 123 * vm_map_startup: 124 * 125 * Initialize the vm_map module. Must be called before 126 * any other vm_map routines. 127 * 128 * Map and entry structures are allocated from the general 129 * purpose memory pool with some exceptions: 130 * 131 * - The kernel map and kmem submap are allocated statically. 132 * - Kernel map entries are allocated out of a static pool. 133 * 134 * These restrictions are necessary since malloc() uses the 135 * maps and requires map entries. 136 */ 137 138 static struct mtx map_sleep_mtx; 139 static uma_zone_t mapentzone; 140 static uma_zone_t kmapentzone; 141 static uma_zone_t mapzone; 142 static uma_zone_t vmspace_zone; 143 static struct vm_object kmapentobj; 144 static void vmspace_zinit(void *mem, int size); 145 static void vmspace_zfini(void *mem, int size); 146 static void vm_map_zinit(void *mem, int size); 147 static void vm_map_zfini(void *mem, int size); 148 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); 149 150 #ifdef INVARIANTS 151 static void vm_map_zdtor(void *mem, int size, void *arg); 152 static void vmspace_zdtor(void *mem, int size, void *arg); 153 #endif 154 155 void 156 vm_map_startup(void) 157 { 158 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 159 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 160 #ifdef INVARIANTS 161 vm_map_zdtor, 162 #else 163 NULL, 164 #endif 165 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 166 uma_prealloc(mapzone, MAX_KMAP); 167 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 168 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 169 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 170 uma_prealloc(kmapentzone, MAX_KMAPENT); 171 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 172 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 173 uma_prealloc(mapentzone, MAX_MAPENT); 174 } 175 176 static void 177 vmspace_zfini(void *mem, int size) 178 { 179 struct vmspace *vm; 180 181 vm = (struct vmspace *)mem; 182 183 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 184 } 185 186 static void 187 vmspace_zinit(void *mem, int size) 188 { 189 struct vmspace *vm; 190 191 vm = (struct vmspace *)mem; 192 193 vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map)); 194 } 195 196 static void 197 vm_map_zfini(void *mem, int size) 198 { 199 vm_map_t map; 200 201 map = (vm_map_t)mem; 202 mtx_destroy(&map->system_mtx); 203 lockdestroy(&map->lock); 204 } 205 206 static void 207 vm_map_zinit(void *mem, int size) 208 { 209 vm_map_t map; 210 211 map = (vm_map_t)mem; 212 map->nentries = 0; 213 map->size = 0; 214 map->infork = 0; 215 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 216 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE); 217 } 218 219 #ifdef INVARIANTS 220 static void 221 vmspace_zdtor(void *mem, int size, void *arg) 222 { 223 struct vmspace *vm; 224 225 vm = (struct vmspace *)mem; 226 227 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 228 } 229 static void 230 vm_map_zdtor(void *mem, int size, void *arg) 231 { 232 vm_map_t map; 233 234 map = (vm_map_t)mem; 235 KASSERT(map->nentries == 0, 236 ("map %p nentries == %d on free.", 237 map, map->nentries)); 238 KASSERT(map->size == 0, 239 ("map %p size == %lu on free.", 240 map, (unsigned long)map->size)); 241 KASSERT(map->infork == 0, 242 ("map %p infork == %d on free.", 243 map, map->infork)); 244 } 245 #endif /* INVARIANTS */ 246 247 /* 248 * Allocate a vmspace structure, including a vm_map and pmap, 249 * and initialize those structures. The refcnt is set to 1. 250 * The remaining fields must be initialized by the caller. 251 */ 252 struct vmspace * 253 vmspace_alloc(min, max) 254 vm_offset_t min, max; 255 { 256 struct vmspace *vm; 257 258 vm = uma_zalloc(vmspace_zone, M_WAITOK); 259 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 260 _vm_map_init(&vm->vm_map, min, max); 261 pmap_pinit(vmspace_pmap(vm)); 262 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 263 vm->vm_refcnt = 1; 264 vm->vm_shm = NULL; 265 vm->vm_exitingcnt = 0; 266 return (vm); 267 } 268 269 void 270 vm_init2(void) 271 { 272 uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 273 (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + 274 maxproc * 2 + maxfiles); 275 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 276 #ifdef INVARIANTS 277 vmspace_zdtor, 278 #else 279 NULL, 280 #endif 281 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 282 pmap_init2(); 283 } 284 285 static __inline void 286 vmspace_dofree(struct vmspace *vm) 287 { 288 CTR1(KTR_VM, "vmspace_free: %p", vm); 289 290 /* 291 * Make sure any SysV shm is freed, it might not have been in 292 * exit1(). 293 */ 294 shmexit(vm); 295 296 /* 297 * Lock the map, to wait out all other references to it. 298 * Delete all of the mappings and pages they hold, then call 299 * the pmap module to reclaim anything left. 300 */ 301 vm_map_lock(&vm->vm_map); 302 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 303 vm->vm_map.max_offset); 304 vm_map_unlock(&vm->vm_map); 305 306 pmap_release(vmspace_pmap(vm)); 307 uma_zfree(vmspace_zone, vm); 308 } 309 310 void 311 vmspace_free(struct vmspace *vm) 312 { 313 GIANT_REQUIRED; 314 315 if (vm->vm_refcnt == 0) 316 panic("vmspace_free: attempt to free already freed vmspace"); 317 318 if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0) 319 vmspace_dofree(vm); 320 } 321 322 void 323 vmspace_exitfree(struct proc *p) 324 { 325 struct vmspace *vm; 326 327 GIANT_REQUIRED; 328 vm = p->p_vmspace; 329 p->p_vmspace = NULL; 330 331 /* 332 * cleanup by parent process wait()ing on exiting child. vm_refcnt 333 * may not be 0 (e.g. fork() and child exits without exec()ing). 334 * exitingcnt may increment above 0 and drop back down to zero 335 * several times while vm_refcnt is held non-zero. vm_refcnt 336 * may also increment above 0 and drop back down to zero several 337 * times while vm_exitingcnt is held non-zero. 338 * 339 * The last wait on the exiting child's vmspace will clean up 340 * the remainder of the vmspace. 341 */ 342 if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0) 343 vmspace_dofree(vm); 344 } 345 346 void 347 _vm_map_lock(vm_map_t map, const char *file, int line) 348 { 349 int error; 350 351 if (map->system_map) 352 _mtx_lock_flags(&map->system_mtx, 0, file, line); 353 else { 354 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread); 355 KASSERT(error == 0, ("%s: failed to get lock", __func__)); 356 } 357 map->timestamp++; 358 } 359 360 void 361 _vm_map_unlock(vm_map_t map, const char *file, int line) 362 { 363 364 if (map->system_map) 365 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 366 else 367 lockmgr(&map->lock, LK_RELEASE, NULL, curthread); 368 } 369 370 void 371 _vm_map_lock_read(vm_map_t map, const char *file, int line) 372 { 373 int error; 374 375 if (map->system_map) 376 _mtx_lock_flags(&map->system_mtx, 0, file, line); 377 else { 378 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread); 379 KASSERT(error == 0, ("%s: failed to get lock", __func__)); 380 } 381 } 382 383 void 384 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 385 { 386 387 if (map->system_map) 388 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 389 else 390 lockmgr(&map->lock, LK_RELEASE, NULL, curthread); 391 } 392 393 int 394 _vm_map_trylock(vm_map_t map, const char *file, int line) 395 { 396 int error; 397 398 error = map->system_map ? 399 !_mtx_trylock(&map->system_mtx, 0, file, line) : 400 lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread); 401 if (error == 0) 402 map->timestamp++; 403 return (error == 0); 404 } 405 406 int 407 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 408 { 409 int error; 410 411 error = map->system_map ? 412 !_mtx_trylock(&map->system_mtx, 0, file, line) : 413 lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread); 414 return (error == 0); 415 } 416 417 int 418 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 419 { 420 421 if (map->system_map) { 422 #ifdef INVARIANTS 423 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 424 #endif 425 } else 426 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE, 427 ("%s: lock not held", __func__)); 428 map->timestamp++; 429 return (0); 430 } 431 432 void 433 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 434 { 435 436 if (map->system_map) { 437 #ifdef INVARIANTS 438 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 439 #endif 440 } else 441 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE, 442 ("%s: lock not held", __func__)); 443 } 444 445 /* 446 * vm_map_unlock_and_wait: 447 */ 448 int 449 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) 450 { 451 452 mtx_lock(&map_sleep_mtx); 453 vm_map_unlock(map); 454 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0)); 455 } 456 457 /* 458 * vm_map_wakeup: 459 */ 460 void 461 vm_map_wakeup(vm_map_t map) 462 { 463 464 /* 465 * Acquire and release map_sleep_mtx to prevent a wakeup() 466 * from being performed (and lost) between the vm_map_unlock() 467 * and the msleep() in vm_map_unlock_and_wait(). 468 */ 469 mtx_lock(&map_sleep_mtx); 470 mtx_unlock(&map_sleep_mtx); 471 wakeup(&map->root); 472 } 473 474 long 475 vmspace_resident_count(struct vmspace *vmspace) 476 { 477 return pmap_resident_count(vmspace_pmap(vmspace)); 478 } 479 480 long 481 vmspace_wired_count(struct vmspace *vmspace) 482 { 483 return pmap_wired_count(vmspace_pmap(vmspace)); 484 } 485 486 /* 487 * vm_map_create: 488 * 489 * Creates and returns a new empty VM map with 490 * the given physical map structure, and having 491 * the given lower and upper address bounds. 492 */ 493 vm_map_t 494 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 495 { 496 vm_map_t result; 497 498 result = uma_zalloc(mapzone, M_WAITOK); 499 CTR1(KTR_VM, "vm_map_create: %p", result); 500 _vm_map_init(result, min, max); 501 result->pmap = pmap; 502 return (result); 503 } 504 505 /* 506 * Initialize an existing vm_map structure 507 * such as that in the vmspace structure. 508 * The pmap is set elsewhere. 509 */ 510 static void 511 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 512 { 513 514 map->header.next = map->header.prev = &map->header; 515 map->needs_wakeup = FALSE; 516 map->system_map = 0; 517 map->min_offset = min; 518 map->max_offset = max; 519 map->first_free = &map->header; 520 map->root = NULL; 521 map->timestamp = 0; 522 } 523 524 void 525 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 526 { 527 _vm_map_init(map, min, max); 528 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 529 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE); 530 } 531 532 /* 533 * vm_map_entry_dispose: [ internal use only ] 534 * 535 * Inverse of vm_map_entry_create. 536 */ 537 static void 538 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 539 { 540 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 541 } 542 543 /* 544 * vm_map_entry_create: [ internal use only ] 545 * 546 * Allocates a VM map entry for insertion. 547 * No entry fields are filled in. 548 */ 549 static vm_map_entry_t 550 vm_map_entry_create(vm_map_t map) 551 { 552 vm_map_entry_t new_entry; 553 554 if (map->system_map) 555 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 556 else 557 new_entry = uma_zalloc(mapentzone, M_WAITOK); 558 if (new_entry == NULL) 559 panic("vm_map_entry_create: kernel resources exhausted"); 560 return (new_entry); 561 } 562 563 /* 564 * vm_map_entry_set_behavior: 565 * 566 * Set the expected access behavior, either normal, random, or 567 * sequential. 568 */ 569 static __inline void 570 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 571 { 572 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 573 (behavior & MAP_ENTRY_BEHAV_MASK); 574 } 575 576 /* 577 * vm_map_entry_splay: 578 * 579 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 580 * the vm_map_entry containing the given address. If, however, that 581 * address is not found in the vm_map, returns a vm_map_entry that is 582 * adjacent to the address, coming before or after it. 583 */ 584 static vm_map_entry_t 585 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root) 586 { 587 struct vm_map_entry dummy; 588 vm_map_entry_t lefttreemax, righttreemin, y; 589 590 if (root == NULL) 591 return (root); 592 lefttreemax = righttreemin = &dummy; 593 for (;; root = y) { 594 if (address < root->start) { 595 if ((y = root->left) == NULL) 596 break; 597 if (address < y->start) { 598 /* Rotate right. */ 599 root->left = y->right; 600 y->right = root; 601 root = y; 602 if ((y = root->left) == NULL) 603 break; 604 } 605 /* Link into the new root's right tree. */ 606 righttreemin->left = root; 607 righttreemin = root; 608 } else if (address >= root->end) { 609 if ((y = root->right) == NULL) 610 break; 611 if (address >= y->end) { 612 /* Rotate left. */ 613 root->right = y->left; 614 y->left = root; 615 root = y; 616 if ((y = root->right) == NULL) 617 break; 618 } 619 /* Link into the new root's left tree. */ 620 lefttreemax->right = root; 621 lefttreemax = root; 622 } else 623 break; 624 } 625 /* Assemble the new root. */ 626 lefttreemax->right = root->left; 627 righttreemin->left = root->right; 628 root->left = dummy.right; 629 root->right = dummy.left; 630 return (root); 631 } 632 633 /* 634 * vm_map_entry_{un,}link: 635 * 636 * Insert/remove entries from maps. 637 */ 638 static void 639 vm_map_entry_link(vm_map_t map, 640 vm_map_entry_t after_where, 641 vm_map_entry_t entry) 642 { 643 644 CTR4(KTR_VM, 645 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 646 map->nentries, entry, after_where); 647 map->nentries++; 648 entry->prev = after_where; 649 entry->next = after_where->next; 650 entry->next->prev = entry; 651 after_where->next = entry; 652 653 if (after_where != &map->header) { 654 if (after_where != map->root) 655 vm_map_entry_splay(after_where->start, map->root); 656 entry->right = after_where->right; 657 entry->left = after_where; 658 after_where->right = NULL; 659 } else { 660 entry->right = map->root; 661 entry->left = NULL; 662 } 663 map->root = entry; 664 } 665 666 static void 667 vm_map_entry_unlink(vm_map_t map, 668 vm_map_entry_t entry) 669 { 670 vm_map_entry_t next, prev, root; 671 672 if (entry != map->root) 673 vm_map_entry_splay(entry->start, map->root); 674 if (entry->left == NULL) 675 root = entry->right; 676 else { 677 root = vm_map_entry_splay(entry->start, entry->left); 678 root->right = entry->right; 679 } 680 map->root = root; 681 682 prev = entry->prev; 683 next = entry->next; 684 next->prev = prev; 685 prev->next = next; 686 map->nentries--; 687 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 688 map->nentries, entry); 689 } 690 691 /* 692 * vm_map_lookup_entry: [ internal use only ] 693 * 694 * Finds the map entry containing (or 695 * immediately preceding) the specified address 696 * in the given map; the entry is returned 697 * in the "entry" parameter. The boolean 698 * result indicates whether the address is 699 * actually contained in the map. 700 */ 701 boolean_t 702 vm_map_lookup_entry( 703 vm_map_t map, 704 vm_offset_t address, 705 vm_map_entry_t *entry) /* OUT */ 706 { 707 vm_map_entry_t cur; 708 709 cur = vm_map_entry_splay(address, map->root); 710 if (cur == NULL) 711 *entry = &map->header; 712 else { 713 map->root = cur; 714 715 if (address >= cur->start) { 716 *entry = cur; 717 if (cur->end > address) 718 return (TRUE); 719 } else 720 *entry = cur->prev; 721 } 722 return (FALSE); 723 } 724 725 /* 726 * vm_map_insert: 727 * 728 * Inserts the given whole VM object into the target 729 * map at the specified address range. The object's 730 * size should match that of the address range. 731 * 732 * Requires that the map be locked, and leaves it so. 733 * 734 * If object is non-NULL, ref count must be bumped by caller 735 * prior to making call to account for the new entry. 736 */ 737 int 738 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 739 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 740 int cow) 741 { 742 vm_map_entry_t new_entry; 743 vm_map_entry_t prev_entry; 744 vm_map_entry_t temp_entry; 745 vm_eflags_t protoeflags; 746 747 /* 748 * Check that the start and end points are not bogus. 749 */ 750 if ((start < map->min_offset) || (end > map->max_offset) || 751 (start >= end)) 752 return (KERN_INVALID_ADDRESS); 753 754 /* 755 * Find the entry prior to the proposed starting address; if it's part 756 * of an existing entry, this range is bogus. 757 */ 758 if (vm_map_lookup_entry(map, start, &temp_entry)) 759 return (KERN_NO_SPACE); 760 761 prev_entry = temp_entry; 762 763 /* 764 * Assert that the next entry doesn't overlap the end point. 765 */ 766 if ((prev_entry->next != &map->header) && 767 (prev_entry->next->start < end)) 768 return (KERN_NO_SPACE); 769 770 protoeflags = 0; 771 772 if (cow & MAP_COPY_ON_WRITE) 773 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 774 775 if (cow & MAP_NOFAULT) { 776 protoeflags |= MAP_ENTRY_NOFAULT; 777 778 KASSERT(object == NULL, 779 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 780 } 781 if (cow & MAP_DISABLE_SYNCER) 782 protoeflags |= MAP_ENTRY_NOSYNC; 783 if (cow & MAP_DISABLE_COREDUMP) 784 protoeflags |= MAP_ENTRY_NOCOREDUMP; 785 786 if (object != NULL) { 787 /* 788 * OBJ_ONEMAPPING must be cleared unless this mapping 789 * is trivially proven to be the only mapping for any 790 * of the object's pages. (Object granularity 791 * reference counting is insufficient to recognize 792 * aliases with precision.) 793 */ 794 VM_OBJECT_LOCK(object); 795 if (object->ref_count > 1 || object->shadow_count != 0) 796 vm_object_clear_flag(object, OBJ_ONEMAPPING); 797 VM_OBJECT_UNLOCK(object); 798 } 799 else if ((prev_entry != &map->header) && 800 (prev_entry->eflags == protoeflags) && 801 (prev_entry->end == start) && 802 (prev_entry->wired_count == 0) && 803 ((prev_entry->object.vm_object == NULL) || 804 vm_object_coalesce(prev_entry->object.vm_object, 805 OFF_TO_IDX(prev_entry->offset), 806 (vm_size_t)(prev_entry->end - prev_entry->start), 807 (vm_size_t)(end - prev_entry->end)))) { 808 /* 809 * We were able to extend the object. Determine if we 810 * can extend the previous map entry to include the 811 * new range as well. 812 */ 813 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 814 (prev_entry->protection == prot) && 815 (prev_entry->max_protection == max)) { 816 map->size += (end - prev_entry->end); 817 prev_entry->end = end; 818 vm_map_simplify_entry(map, prev_entry); 819 return (KERN_SUCCESS); 820 } 821 822 /* 823 * If we can extend the object but cannot extend the 824 * map entry, we have to create a new map entry. We 825 * must bump the ref count on the extended object to 826 * account for it. object may be NULL. 827 */ 828 object = prev_entry->object.vm_object; 829 offset = prev_entry->offset + 830 (prev_entry->end - prev_entry->start); 831 vm_object_reference(object); 832 } 833 834 /* 835 * NOTE: if conditionals fail, object can be NULL here. This occurs 836 * in things like the buffer map where we manage kva but do not manage 837 * backing objects. 838 */ 839 840 /* 841 * Create a new entry 842 */ 843 new_entry = vm_map_entry_create(map); 844 new_entry->start = start; 845 new_entry->end = end; 846 847 new_entry->eflags = protoeflags; 848 new_entry->object.vm_object = object; 849 new_entry->offset = offset; 850 new_entry->avail_ssize = 0; 851 852 new_entry->inheritance = VM_INHERIT_DEFAULT; 853 new_entry->protection = prot; 854 new_entry->max_protection = max; 855 new_entry->wired_count = 0; 856 857 /* 858 * Insert the new entry into the list 859 */ 860 vm_map_entry_link(map, prev_entry, new_entry); 861 map->size += new_entry->end - new_entry->start; 862 863 /* 864 * Update the free space hint 865 */ 866 if ((map->first_free == prev_entry) && 867 (prev_entry->end >= new_entry->start)) { 868 map->first_free = new_entry; 869 } 870 871 #if 0 872 /* 873 * Temporarily removed to avoid MAP_STACK panic, due to 874 * MAP_STACK being a huge hack. Will be added back in 875 * when MAP_STACK (and the user stack mapping) is fixed. 876 */ 877 /* 878 * It may be possible to simplify the entry 879 */ 880 vm_map_simplify_entry(map, new_entry); 881 #endif 882 883 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 884 vm_map_pmap_enter(map, start, 885 object, OFF_TO_IDX(offset), end - start, 886 cow & MAP_PREFAULT_PARTIAL); 887 } 888 889 return (KERN_SUCCESS); 890 } 891 892 /* 893 * Find sufficient space for `length' bytes in the given map, starting at 894 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 895 */ 896 int 897 vm_map_findspace( 898 vm_map_t map, 899 vm_offset_t start, 900 vm_size_t length, 901 vm_offset_t *addr) 902 { 903 vm_map_entry_t entry, next; 904 vm_offset_t end; 905 906 if (start < map->min_offset) 907 start = map->min_offset; 908 if (start > map->max_offset) 909 return (1); 910 911 /* 912 * Look for the first possible address; if there's already something 913 * at this address, we have to start after it. 914 */ 915 if (start == map->min_offset) { 916 if ((entry = map->first_free) != &map->header) 917 start = entry->end; 918 } else { 919 vm_map_entry_t tmp; 920 921 if (vm_map_lookup_entry(map, start, &tmp)) 922 start = tmp->end; 923 entry = tmp; 924 } 925 926 /* 927 * Look through the rest of the map, trying to fit a new region in the 928 * gap between existing regions, or after the very last region. 929 */ 930 for (;; start = (entry = next)->end) { 931 /* 932 * Find the end of the proposed new region. Be sure we didn't 933 * go beyond the end of the map, or wrap around the address; 934 * if so, we lose. Otherwise, if this is the last entry, or 935 * if the proposed new region fits before the next entry, we 936 * win. 937 */ 938 end = start + length; 939 if (end > map->max_offset || end < start) 940 return (1); 941 next = entry->next; 942 if (next == &map->header || next->start >= end) 943 break; 944 } 945 *addr = start; 946 if (map == kernel_map) { 947 vm_offset_t ksize; 948 if ((ksize = round_page(start + length)) > kernel_vm_end) { 949 pmap_growkernel(ksize); 950 } 951 } 952 return (0); 953 } 954 955 /* 956 * vm_map_find finds an unallocated region in the target address 957 * map with the given length. The search is defined to be 958 * first-fit from the specified address; the region found is 959 * returned in the same parameter. 960 * 961 * If object is non-NULL, ref count must be bumped by caller 962 * prior to making call to account for the new entry. 963 */ 964 int 965 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 966 vm_offset_t *addr, /* IN/OUT */ 967 vm_size_t length, boolean_t find_space, vm_prot_t prot, 968 vm_prot_t max, int cow) 969 { 970 vm_offset_t start; 971 int result, s = 0; 972 973 start = *addr; 974 975 if (map == kmem_map) 976 s = splvm(); 977 978 vm_map_lock(map); 979 if (find_space) { 980 if (vm_map_findspace(map, start, length, addr)) { 981 vm_map_unlock(map); 982 if (map == kmem_map) 983 splx(s); 984 return (KERN_NO_SPACE); 985 } 986 start = *addr; 987 } 988 result = vm_map_insert(map, object, offset, 989 start, start + length, prot, max, cow); 990 vm_map_unlock(map); 991 992 if (map == kmem_map) 993 splx(s); 994 995 return (result); 996 } 997 998 /* 999 * vm_map_simplify_entry: 1000 * 1001 * Simplify the given map entry by merging with either neighbor. This 1002 * routine also has the ability to merge with both neighbors. 1003 * 1004 * The map must be locked. 1005 * 1006 * This routine guarentees that the passed entry remains valid (though 1007 * possibly extended). When merging, this routine may delete one or 1008 * both neighbors. 1009 */ 1010 void 1011 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1012 { 1013 vm_map_entry_t next, prev; 1014 vm_size_t prevsize, esize; 1015 1016 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1017 return; 1018 1019 prev = entry->prev; 1020 if (prev != &map->header) { 1021 prevsize = prev->end - prev->start; 1022 if ( (prev->end == entry->start) && 1023 (prev->object.vm_object == entry->object.vm_object) && 1024 (!prev->object.vm_object || 1025 (prev->offset + prevsize == entry->offset)) && 1026 (prev->eflags == entry->eflags) && 1027 (prev->protection == entry->protection) && 1028 (prev->max_protection == entry->max_protection) && 1029 (prev->inheritance == entry->inheritance) && 1030 (prev->wired_count == entry->wired_count)) { 1031 if (map->first_free == prev) 1032 map->first_free = entry; 1033 vm_map_entry_unlink(map, prev); 1034 entry->start = prev->start; 1035 entry->offset = prev->offset; 1036 if (prev->object.vm_object) 1037 vm_object_deallocate(prev->object.vm_object); 1038 vm_map_entry_dispose(map, prev); 1039 } 1040 } 1041 1042 next = entry->next; 1043 if (next != &map->header) { 1044 esize = entry->end - entry->start; 1045 if ((entry->end == next->start) && 1046 (next->object.vm_object == entry->object.vm_object) && 1047 (!entry->object.vm_object || 1048 (entry->offset + esize == next->offset)) && 1049 (next->eflags == entry->eflags) && 1050 (next->protection == entry->protection) && 1051 (next->max_protection == entry->max_protection) && 1052 (next->inheritance == entry->inheritance) && 1053 (next->wired_count == entry->wired_count)) { 1054 if (map->first_free == next) 1055 map->first_free = entry; 1056 vm_map_entry_unlink(map, next); 1057 entry->end = next->end; 1058 if (next->object.vm_object) 1059 vm_object_deallocate(next->object.vm_object); 1060 vm_map_entry_dispose(map, next); 1061 } 1062 } 1063 } 1064 /* 1065 * vm_map_clip_start: [ internal use only ] 1066 * 1067 * Asserts that the given entry begins at or after 1068 * the specified address; if necessary, 1069 * it splits the entry into two. 1070 */ 1071 #define vm_map_clip_start(map, entry, startaddr) \ 1072 { \ 1073 if (startaddr > entry->start) \ 1074 _vm_map_clip_start(map, entry, startaddr); \ 1075 } 1076 1077 /* 1078 * This routine is called only when it is known that 1079 * the entry must be split. 1080 */ 1081 static void 1082 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1083 { 1084 vm_map_entry_t new_entry; 1085 1086 /* 1087 * Split off the front portion -- note that we must insert the new 1088 * entry BEFORE this one, so that this entry has the specified 1089 * starting address. 1090 */ 1091 vm_map_simplify_entry(map, entry); 1092 1093 /* 1094 * If there is no object backing this entry, we might as well create 1095 * one now. If we defer it, an object can get created after the map 1096 * is clipped, and individual objects will be created for the split-up 1097 * map. This is a bit of a hack, but is also about the best place to 1098 * put this improvement. 1099 */ 1100 if (entry->object.vm_object == NULL && !map->system_map) { 1101 vm_object_t object; 1102 object = vm_object_allocate(OBJT_DEFAULT, 1103 atop(entry->end - entry->start)); 1104 entry->object.vm_object = object; 1105 entry->offset = 0; 1106 } 1107 1108 new_entry = vm_map_entry_create(map); 1109 *new_entry = *entry; 1110 1111 new_entry->end = start; 1112 entry->offset += (start - entry->start); 1113 entry->start = start; 1114 1115 vm_map_entry_link(map, entry->prev, new_entry); 1116 1117 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1118 vm_object_reference(new_entry->object.vm_object); 1119 } 1120 } 1121 1122 /* 1123 * vm_map_clip_end: [ internal use only ] 1124 * 1125 * Asserts that the given entry ends at or before 1126 * the specified address; if necessary, 1127 * it splits the entry into two. 1128 */ 1129 #define vm_map_clip_end(map, entry, endaddr) \ 1130 { \ 1131 if ((endaddr) < (entry->end)) \ 1132 _vm_map_clip_end((map), (entry), (endaddr)); \ 1133 } 1134 1135 /* 1136 * This routine is called only when it is known that 1137 * the entry must be split. 1138 */ 1139 static void 1140 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1141 { 1142 vm_map_entry_t new_entry; 1143 1144 /* 1145 * If there is no object backing this entry, we might as well create 1146 * one now. If we defer it, an object can get created after the map 1147 * is clipped, and individual objects will be created for the split-up 1148 * map. This is a bit of a hack, but is also about the best place to 1149 * put this improvement. 1150 */ 1151 if (entry->object.vm_object == NULL && !map->system_map) { 1152 vm_object_t object; 1153 object = vm_object_allocate(OBJT_DEFAULT, 1154 atop(entry->end - entry->start)); 1155 entry->object.vm_object = object; 1156 entry->offset = 0; 1157 } 1158 1159 /* 1160 * Create a new entry and insert it AFTER the specified entry 1161 */ 1162 new_entry = vm_map_entry_create(map); 1163 *new_entry = *entry; 1164 1165 new_entry->start = entry->end = end; 1166 new_entry->offset += (end - entry->start); 1167 1168 vm_map_entry_link(map, entry, new_entry); 1169 1170 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1171 vm_object_reference(new_entry->object.vm_object); 1172 } 1173 } 1174 1175 /* 1176 * VM_MAP_RANGE_CHECK: [ internal use only ] 1177 * 1178 * Asserts that the starting and ending region 1179 * addresses fall within the valid range of the map. 1180 */ 1181 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1182 { \ 1183 if (start < vm_map_min(map)) \ 1184 start = vm_map_min(map); \ 1185 if (end > vm_map_max(map)) \ 1186 end = vm_map_max(map); \ 1187 if (start > end) \ 1188 start = end; \ 1189 } 1190 1191 /* 1192 * vm_map_submap: [ kernel use only ] 1193 * 1194 * Mark the given range as handled by a subordinate map. 1195 * 1196 * This range must have been created with vm_map_find, 1197 * and no other operations may have been performed on this 1198 * range prior to calling vm_map_submap. 1199 * 1200 * Only a limited number of operations can be performed 1201 * within this rage after calling vm_map_submap: 1202 * vm_fault 1203 * [Don't try vm_map_copy!] 1204 * 1205 * To remove a submapping, one must first remove the 1206 * range from the superior map, and then destroy the 1207 * submap (if desired). [Better yet, don't try it.] 1208 */ 1209 int 1210 vm_map_submap( 1211 vm_map_t map, 1212 vm_offset_t start, 1213 vm_offset_t end, 1214 vm_map_t submap) 1215 { 1216 vm_map_entry_t entry; 1217 int result = KERN_INVALID_ARGUMENT; 1218 1219 vm_map_lock(map); 1220 1221 VM_MAP_RANGE_CHECK(map, start, end); 1222 1223 if (vm_map_lookup_entry(map, start, &entry)) { 1224 vm_map_clip_start(map, entry, start); 1225 } else 1226 entry = entry->next; 1227 1228 vm_map_clip_end(map, entry, end); 1229 1230 if ((entry->start == start) && (entry->end == end) && 1231 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1232 (entry->object.vm_object == NULL)) { 1233 entry->object.sub_map = submap; 1234 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1235 result = KERN_SUCCESS; 1236 } 1237 vm_map_unlock(map); 1238 1239 return (result); 1240 } 1241 1242 /* 1243 * The maximum number of pages to map 1244 */ 1245 #define MAX_INIT_PT 96 1246 1247 /* 1248 * vm_map_pmap_enter: 1249 * 1250 * Preload the mappings for the given object into the specified 1251 * map. This eliminates the soft faults on process startup and 1252 * immediately after an mmap(2). 1253 */ 1254 void 1255 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, 1256 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1257 { 1258 vm_offset_t tmpidx; 1259 int psize; 1260 vm_page_t p, mpte; 1261 1262 if (object == NULL) 1263 return; 1264 mtx_lock(&Giant); 1265 VM_OBJECT_LOCK(object); 1266 if (object->type == OBJT_DEVICE) { 1267 pmap_object_init_pt(map->pmap, addr, object, pindex, size); 1268 goto unlock_return; 1269 } 1270 1271 psize = atop(size); 1272 1273 if (object->type != OBJT_VNODE || 1274 ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 1275 (object->resident_page_count > MAX_INIT_PT))) { 1276 goto unlock_return; 1277 } 1278 1279 if (psize + pindex > object->size) { 1280 if (object->size < pindex) 1281 goto unlock_return; 1282 psize = object->size - pindex; 1283 } 1284 1285 mpte = NULL; 1286 1287 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1288 if (p->pindex < pindex) { 1289 p = vm_page_splay(pindex, object->root); 1290 if ((object->root = p)->pindex < pindex) 1291 p = TAILQ_NEXT(p, listq); 1292 } 1293 } 1294 /* 1295 * Assert: the variable p is either (1) the page with the 1296 * least pindex greater than or equal to the parameter pindex 1297 * or (2) NULL. 1298 */ 1299 for (; 1300 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1301 p = TAILQ_NEXT(p, listq)) { 1302 /* 1303 * don't allow an madvise to blow away our really 1304 * free pages allocating pv entries. 1305 */ 1306 if ((flags & MAP_PREFAULT_MADVISE) && 1307 cnt.v_free_count < cnt.v_free_reserved) { 1308 break; 1309 } 1310 vm_page_lock_queues(); 1311 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 1312 (p->busy == 0) && 1313 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1314 if ((p->queue - p->pc) == PQ_CACHE) 1315 vm_page_deactivate(p); 1316 vm_page_busy(p); 1317 vm_page_unlock_queues(); 1318 VM_OBJECT_UNLOCK(object); 1319 mpte = pmap_enter_quick(map->pmap, 1320 addr + ptoa(tmpidx), p, mpte); 1321 VM_OBJECT_LOCK(object); 1322 vm_page_lock_queues(); 1323 vm_page_wakeup(p); 1324 } 1325 vm_page_unlock_queues(); 1326 } 1327 unlock_return: 1328 VM_OBJECT_UNLOCK(object); 1329 mtx_unlock(&Giant); 1330 } 1331 1332 /* 1333 * vm_map_protect: 1334 * 1335 * Sets the protection of the specified address 1336 * region in the target map. If "set_max" is 1337 * specified, the maximum protection is to be set; 1338 * otherwise, only the current protection is affected. 1339 */ 1340 int 1341 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1342 vm_prot_t new_prot, boolean_t set_max) 1343 { 1344 vm_map_entry_t current; 1345 vm_map_entry_t entry; 1346 1347 vm_map_lock(map); 1348 1349 VM_MAP_RANGE_CHECK(map, start, end); 1350 1351 if (vm_map_lookup_entry(map, start, &entry)) { 1352 vm_map_clip_start(map, entry, start); 1353 } else { 1354 entry = entry->next; 1355 } 1356 1357 /* 1358 * Make a first pass to check for protection violations. 1359 */ 1360 current = entry; 1361 while ((current != &map->header) && (current->start < end)) { 1362 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1363 vm_map_unlock(map); 1364 return (KERN_INVALID_ARGUMENT); 1365 } 1366 if ((new_prot & current->max_protection) != new_prot) { 1367 vm_map_unlock(map); 1368 return (KERN_PROTECTION_FAILURE); 1369 } 1370 current = current->next; 1371 } 1372 1373 /* 1374 * Go back and fix up protections. [Note that clipping is not 1375 * necessary the second time.] 1376 */ 1377 current = entry; 1378 while ((current != &map->header) && (current->start < end)) { 1379 vm_prot_t old_prot; 1380 1381 vm_map_clip_end(map, current, end); 1382 1383 old_prot = current->protection; 1384 if (set_max) 1385 current->protection = 1386 (current->max_protection = new_prot) & 1387 old_prot; 1388 else 1389 current->protection = new_prot; 1390 1391 /* 1392 * Update physical map if necessary. Worry about copy-on-write 1393 * here -- CHECK THIS XXX 1394 */ 1395 if (current->protection != old_prot) { 1396 mtx_lock(&Giant); 1397 vm_page_lock_queues(); 1398 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1399 VM_PROT_ALL) 1400 pmap_protect(map->pmap, current->start, 1401 current->end, 1402 current->protection & MASK(current)); 1403 #undef MASK 1404 vm_page_unlock_queues(); 1405 mtx_unlock(&Giant); 1406 } 1407 vm_map_simplify_entry(map, current); 1408 current = current->next; 1409 } 1410 vm_map_unlock(map); 1411 return (KERN_SUCCESS); 1412 } 1413 1414 /* 1415 * vm_map_madvise: 1416 * 1417 * This routine traverses a processes map handling the madvise 1418 * system call. Advisories are classified as either those effecting 1419 * the vm_map_entry structure, or those effecting the underlying 1420 * objects. 1421 */ 1422 int 1423 vm_map_madvise( 1424 vm_map_t map, 1425 vm_offset_t start, 1426 vm_offset_t end, 1427 int behav) 1428 { 1429 vm_map_entry_t current, entry; 1430 int modify_map = 0; 1431 1432 /* 1433 * Some madvise calls directly modify the vm_map_entry, in which case 1434 * we need to use an exclusive lock on the map and we need to perform 1435 * various clipping operations. Otherwise we only need a read-lock 1436 * on the map. 1437 */ 1438 switch(behav) { 1439 case MADV_NORMAL: 1440 case MADV_SEQUENTIAL: 1441 case MADV_RANDOM: 1442 case MADV_NOSYNC: 1443 case MADV_AUTOSYNC: 1444 case MADV_NOCORE: 1445 case MADV_CORE: 1446 modify_map = 1; 1447 vm_map_lock(map); 1448 break; 1449 case MADV_WILLNEED: 1450 case MADV_DONTNEED: 1451 case MADV_FREE: 1452 vm_map_lock_read(map); 1453 break; 1454 default: 1455 return (KERN_INVALID_ARGUMENT); 1456 } 1457 1458 /* 1459 * Locate starting entry and clip if necessary. 1460 */ 1461 VM_MAP_RANGE_CHECK(map, start, end); 1462 1463 if (vm_map_lookup_entry(map, start, &entry)) { 1464 if (modify_map) 1465 vm_map_clip_start(map, entry, start); 1466 } else { 1467 entry = entry->next; 1468 } 1469 1470 if (modify_map) { 1471 /* 1472 * madvise behaviors that are implemented in the vm_map_entry. 1473 * 1474 * We clip the vm_map_entry so that behavioral changes are 1475 * limited to the specified address range. 1476 */ 1477 for (current = entry; 1478 (current != &map->header) && (current->start < end); 1479 current = current->next 1480 ) { 1481 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1482 continue; 1483 1484 vm_map_clip_end(map, current, end); 1485 1486 switch (behav) { 1487 case MADV_NORMAL: 1488 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1489 break; 1490 case MADV_SEQUENTIAL: 1491 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1492 break; 1493 case MADV_RANDOM: 1494 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1495 break; 1496 case MADV_NOSYNC: 1497 current->eflags |= MAP_ENTRY_NOSYNC; 1498 break; 1499 case MADV_AUTOSYNC: 1500 current->eflags &= ~MAP_ENTRY_NOSYNC; 1501 break; 1502 case MADV_NOCORE: 1503 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1504 break; 1505 case MADV_CORE: 1506 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1507 break; 1508 default: 1509 break; 1510 } 1511 vm_map_simplify_entry(map, current); 1512 } 1513 vm_map_unlock(map); 1514 } else { 1515 vm_pindex_t pindex; 1516 int count; 1517 1518 /* 1519 * madvise behaviors that are implemented in the underlying 1520 * vm_object. 1521 * 1522 * Since we don't clip the vm_map_entry, we have to clip 1523 * the vm_object pindex and count. 1524 */ 1525 for (current = entry; 1526 (current != &map->header) && (current->start < end); 1527 current = current->next 1528 ) { 1529 vm_offset_t useStart; 1530 1531 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1532 continue; 1533 1534 pindex = OFF_TO_IDX(current->offset); 1535 count = atop(current->end - current->start); 1536 useStart = current->start; 1537 1538 if (current->start < start) { 1539 pindex += atop(start - current->start); 1540 count -= atop(start - current->start); 1541 useStart = start; 1542 } 1543 if (current->end > end) 1544 count -= atop(current->end - end); 1545 1546 if (count <= 0) 1547 continue; 1548 1549 vm_object_madvise(current->object.vm_object, 1550 pindex, count, behav); 1551 if (behav == MADV_WILLNEED) { 1552 vm_map_pmap_enter(map, 1553 useStart, 1554 current->object.vm_object, 1555 pindex, 1556 (count << PAGE_SHIFT), 1557 MAP_PREFAULT_MADVISE 1558 ); 1559 } 1560 } 1561 vm_map_unlock_read(map); 1562 } 1563 return (0); 1564 } 1565 1566 1567 /* 1568 * vm_map_inherit: 1569 * 1570 * Sets the inheritance of the specified address 1571 * range in the target map. Inheritance 1572 * affects how the map will be shared with 1573 * child maps at the time of vm_map_fork. 1574 */ 1575 int 1576 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1577 vm_inherit_t new_inheritance) 1578 { 1579 vm_map_entry_t entry; 1580 vm_map_entry_t temp_entry; 1581 1582 switch (new_inheritance) { 1583 case VM_INHERIT_NONE: 1584 case VM_INHERIT_COPY: 1585 case VM_INHERIT_SHARE: 1586 break; 1587 default: 1588 return (KERN_INVALID_ARGUMENT); 1589 } 1590 vm_map_lock(map); 1591 VM_MAP_RANGE_CHECK(map, start, end); 1592 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1593 entry = temp_entry; 1594 vm_map_clip_start(map, entry, start); 1595 } else 1596 entry = temp_entry->next; 1597 while ((entry != &map->header) && (entry->start < end)) { 1598 vm_map_clip_end(map, entry, end); 1599 entry->inheritance = new_inheritance; 1600 vm_map_simplify_entry(map, entry); 1601 entry = entry->next; 1602 } 1603 vm_map_unlock(map); 1604 return (KERN_SUCCESS); 1605 } 1606 1607 /* 1608 * vm_map_unwire: 1609 * 1610 * Implements both kernel and user unwiring. 1611 */ 1612 int 1613 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1614 int flags) 1615 { 1616 vm_map_entry_t entry, first_entry, tmp_entry; 1617 vm_offset_t saved_start; 1618 unsigned int last_timestamp; 1619 int rv; 1620 boolean_t need_wakeup, result, user_unwire; 1621 1622 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1623 vm_map_lock(map); 1624 VM_MAP_RANGE_CHECK(map, start, end); 1625 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1626 if (flags & VM_MAP_WIRE_HOLESOK) 1627 first_entry = first_entry->next; 1628 else { 1629 vm_map_unlock(map); 1630 return (KERN_INVALID_ADDRESS); 1631 } 1632 } 1633 last_timestamp = map->timestamp; 1634 entry = first_entry; 1635 while (entry != &map->header && entry->start < end) { 1636 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1637 /* 1638 * We have not yet clipped the entry. 1639 */ 1640 saved_start = (start >= entry->start) ? start : 1641 entry->start; 1642 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1643 if (vm_map_unlock_and_wait(map, user_unwire)) { 1644 /* 1645 * Allow interruption of user unwiring? 1646 */ 1647 } 1648 vm_map_lock(map); 1649 if (last_timestamp+1 != map->timestamp) { 1650 /* 1651 * Look again for the entry because the map was 1652 * modified while it was unlocked. 1653 * Specifically, the entry may have been 1654 * clipped, merged, or deleted. 1655 */ 1656 if (!vm_map_lookup_entry(map, saved_start, 1657 &tmp_entry)) { 1658 if (flags & VM_MAP_WIRE_HOLESOK) 1659 tmp_entry = tmp_entry->next; 1660 else { 1661 if (saved_start == start) { 1662 /* 1663 * First_entry has been deleted. 1664 */ 1665 vm_map_unlock(map); 1666 return (KERN_INVALID_ADDRESS); 1667 } 1668 end = saved_start; 1669 rv = KERN_INVALID_ADDRESS; 1670 goto done; 1671 } 1672 } 1673 if (entry == first_entry) 1674 first_entry = tmp_entry; 1675 else 1676 first_entry = NULL; 1677 entry = tmp_entry; 1678 } 1679 last_timestamp = map->timestamp; 1680 continue; 1681 } 1682 vm_map_clip_start(map, entry, start); 1683 vm_map_clip_end(map, entry, end); 1684 /* 1685 * Mark the entry in case the map lock is released. (See 1686 * above.) 1687 */ 1688 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1689 /* 1690 * Check the map for holes in the specified region. 1691 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1692 */ 1693 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1694 (entry->end < end && (entry->next == &map->header || 1695 entry->next->start > entry->end))) { 1696 end = entry->end; 1697 rv = KERN_INVALID_ADDRESS; 1698 goto done; 1699 } 1700 /* 1701 * Require that the entry is wired. 1702 */ 1703 if (entry->wired_count == 0 || (user_unwire && 1704 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) { 1705 end = entry->end; 1706 rv = KERN_INVALID_ARGUMENT; 1707 goto done; 1708 } 1709 entry = entry->next; 1710 } 1711 rv = KERN_SUCCESS; 1712 done: 1713 need_wakeup = FALSE; 1714 if (first_entry == NULL) { 1715 result = vm_map_lookup_entry(map, start, &first_entry); 1716 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1717 first_entry = first_entry->next; 1718 else 1719 KASSERT(result, ("vm_map_unwire: lookup failed")); 1720 } 1721 entry = first_entry; 1722 while (entry != &map->header && entry->start < end) { 1723 if (rv == KERN_SUCCESS) { 1724 if (user_unwire) 1725 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1726 entry->wired_count--; 1727 if (entry->wired_count == 0) { 1728 /* 1729 * Retain the map lock. 1730 */ 1731 vm_fault_unwire(map, entry->start, entry->end); 1732 } 1733 } 1734 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1735 ("vm_map_unwire: in-transition flag missing")); 1736 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1737 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1738 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1739 need_wakeup = TRUE; 1740 } 1741 vm_map_simplify_entry(map, entry); 1742 entry = entry->next; 1743 } 1744 vm_map_unlock(map); 1745 if (need_wakeup) 1746 vm_map_wakeup(map); 1747 return (rv); 1748 } 1749 1750 /* 1751 * vm_map_wire: 1752 * 1753 * Implements both kernel and user wiring. 1754 */ 1755 int 1756 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1757 int flags) 1758 { 1759 vm_map_entry_t entry, first_entry, tmp_entry; 1760 vm_offset_t saved_end, saved_start; 1761 unsigned int last_timestamp; 1762 int rv; 1763 boolean_t need_wakeup, result, user_wire; 1764 1765 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1766 vm_map_lock(map); 1767 VM_MAP_RANGE_CHECK(map, start, end); 1768 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1769 if (flags & VM_MAP_WIRE_HOLESOK) 1770 first_entry = first_entry->next; 1771 else { 1772 vm_map_unlock(map); 1773 return (KERN_INVALID_ADDRESS); 1774 } 1775 } 1776 last_timestamp = map->timestamp; 1777 entry = first_entry; 1778 while (entry != &map->header && entry->start < end) { 1779 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1780 /* 1781 * We have not yet clipped the entry. 1782 */ 1783 saved_start = (start >= entry->start) ? start : 1784 entry->start; 1785 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1786 if (vm_map_unlock_and_wait(map, user_wire)) { 1787 /* 1788 * Allow interruption of user wiring? 1789 */ 1790 } 1791 vm_map_lock(map); 1792 if (last_timestamp + 1 != map->timestamp) { 1793 /* 1794 * Look again for the entry because the map was 1795 * modified while it was unlocked. 1796 * Specifically, the entry may have been 1797 * clipped, merged, or deleted. 1798 */ 1799 if (!vm_map_lookup_entry(map, saved_start, 1800 &tmp_entry)) { 1801 if (flags & VM_MAP_WIRE_HOLESOK) 1802 tmp_entry = tmp_entry->next; 1803 else { 1804 if (saved_start == start) { 1805 /* 1806 * first_entry has been deleted. 1807 */ 1808 vm_map_unlock(map); 1809 return (KERN_INVALID_ADDRESS); 1810 } 1811 end = saved_start; 1812 rv = KERN_INVALID_ADDRESS; 1813 goto done; 1814 } 1815 } 1816 if (entry == first_entry) 1817 first_entry = tmp_entry; 1818 else 1819 first_entry = NULL; 1820 entry = tmp_entry; 1821 } 1822 last_timestamp = map->timestamp; 1823 continue; 1824 } 1825 vm_map_clip_start(map, entry, start); 1826 vm_map_clip_end(map, entry, end); 1827 /* 1828 * Mark the entry in case the map lock is released. (See 1829 * above.) 1830 */ 1831 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1832 /* 1833 * 1834 */ 1835 if (entry->wired_count == 0) { 1836 entry->wired_count++; 1837 saved_start = entry->start; 1838 saved_end = entry->end; 1839 /* 1840 * Release the map lock, relying on the in-transition 1841 * mark. 1842 */ 1843 vm_map_unlock(map); 1844 rv = vm_fault_wire(map, saved_start, saved_end, 1845 user_wire); 1846 vm_map_lock(map); 1847 if (last_timestamp + 1 != map->timestamp) { 1848 /* 1849 * Look again for the entry because the map was 1850 * modified while it was unlocked. The entry 1851 * may have been clipped, but NOT merged or 1852 * deleted. 1853 */ 1854 result = vm_map_lookup_entry(map, saved_start, 1855 &tmp_entry); 1856 KASSERT(result, ("vm_map_wire: lookup failed")); 1857 if (entry == first_entry) 1858 first_entry = tmp_entry; 1859 else 1860 first_entry = NULL; 1861 entry = tmp_entry; 1862 while (entry->end < saved_end) { 1863 if (rv != KERN_SUCCESS) { 1864 KASSERT(entry->wired_count == 1, 1865 ("vm_map_wire: bad count")); 1866 entry->wired_count = -1; 1867 } 1868 entry = entry->next; 1869 } 1870 } 1871 last_timestamp = map->timestamp; 1872 if (rv != KERN_SUCCESS) { 1873 KASSERT(entry->wired_count == 1, 1874 ("vm_map_wire: bad count")); 1875 /* 1876 * Assign an out-of-range value to represent 1877 * the failure to wire this entry. 1878 */ 1879 entry->wired_count = -1; 1880 end = entry->end; 1881 goto done; 1882 } 1883 } else if (!user_wire || 1884 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 1885 entry->wired_count++; 1886 } 1887 /* 1888 * Check the map for holes in the specified region. 1889 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1890 */ 1891 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1892 (entry->end < end && (entry->next == &map->header || 1893 entry->next->start > entry->end))) { 1894 end = entry->end; 1895 rv = KERN_INVALID_ADDRESS; 1896 goto done; 1897 } 1898 entry = entry->next; 1899 } 1900 rv = KERN_SUCCESS; 1901 done: 1902 need_wakeup = FALSE; 1903 if (first_entry == NULL) { 1904 result = vm_map_lookup_entry(map, start, &first_entry); 1905 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1906 first_entry = first_entry->next; 1907 else 1908 KASSERT(result, ("vm_map_wire: lookup failed")); 1909 } 1910 entry = first_entry; 1911 while (entry != &map->header && entry->start < end) { 1912 if (rv == KERN_SUCCESS) { 1913 if (user_wire) 1914 entry->eflags |= MAP_ENTRY_USER_WIRED; 1915 } else if (entry->wired_count == -1) { 1916 /* 1917 * Wiring failed on this entry. Thus, unwiring is 1918 * unnecessary. 1919 */ 1920 entry->wired_count = 0; 1921 } else { 1922 if (!user_wire || 1923 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1924 entry->wired_count--; 1925 if (entry->wired_count == 0) { 1926 /* 1927 * Retain the map lock. 1928 */ 1929 vm_fault_unwire(map, entry->start, entry->end); 1930 } 1931 } 1932 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1933 ("vm_map_wire: in-transition flag missing")); 1934 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1935 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1936 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1937 need_wakeup = TRUE; 1938 } 1939 vm_map_simplify_entry(map, entry); 1940 entry = entry->next; 1941 } 1942 vm_map_unlock(map); 1943 if (need_wakeup) 1944 vm_map_wakeup(map); 1945 return (rv); 1946 } 1947 1948 /* 1949 * vm_map_sync 1950 * 1951 * Push any dirty cached pages in the address range to their pager. 1952 * If syncio is TRUE, dirty pages are written synchronously. 1953 * If invalidate is TRUE, any cached pages are freed as well. 1954 * 1955 * If the size of the region from start to end is zero, we are 1956 * supposed to flush all modified pages within the region containing 1957 * start. Unfortunately, a region can be split or coalesced with 1958 * neighboring regions, making it difficult to determine what the 1959 * original region was. Therefore, we approximate this requirement by 1960 * flushing the current region containing start. 1961 * 1962 * Returns an error if any part of the specified range is not mapped. 1963 */ 1964 int 1965 vm_map_sync( 1966 vm_map_t map, 1967 vm_offset_t start, 1968 vm_offset_t end, 1969 boolean_t syncio, 1970 boolean_t invalidate) 1971 { 1972 vm_map_entry_t current; 1973 vm_map_entry_t entry; 1974 vm_size_t size; 1975 vm_object_t object; 1976 vm_ooffset_t offset; 1977 1978 vm_map_lock_read(map); 1979 VM_MAP_RANGE_CHECK(map, start, end); 1980 if (!vm_map_lookup_entry(map, start, &entry)) { 1981 vm_map_unlock_read(map); 1982 return (KERN_INVALID_ADDRESS); 1983 } else if (start == end) { 1984 start = entry->start; 1985 end = entry->end; 1986 } 1987 /* 1988 * Make a first pass to check for user-wired memory and holes. 1989 */ 1990 for (current = entry; current->start < end; current = current->next) { 1991 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 1992 vm_map_unlock_read(map); 1993 return (KERN_INVALID_ARGUMENT); 1994 } 1995 if (end > current->end && 1996 (current->next == &map->header || 1997 current->end != current->next->start)) { 1998 vm_map_unlock_read(map); 1999 return (KERN_INVALID_ADDRESS); 2000 } 2001 } 2002 2003 if (invalidate) { 2004 mtx_lock(&Giant); 2005 vm_page_lock_queues(); 2006 pmap_remove(map->pmap, start, end); 2007 vm_page_unlock_queues(); 2008 mtx_unlock(&Giant); 2009 } 2010 /* 2011 * Make a second pass, cleaning/uncaching pages from the indicated 2012 * objects as we go. 2013 */ 2014 for (current = entry; current->start < end; current = current->next) { 2015 offset = current->offset + (start - current->start); 2016 size = (end <= current->end ? end : current->end) - start; 2017 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2018 vm_map_t smap; 2019 vm_map_entry_t tentry; 2020 vm_size_t tsize; 2021 2022 smap = current->object.sub_map; 2023 vm_map_lock_read(smap); 2024 (void) vm_map_lookup_entry(smap, offset, &tentry); 2025 tsize = tentry->end - offset; 2026 if (tsize < size) 2027 size = tsize; 2028 object = tentry->object.vm_object; 2029 offset = tentry->offset + (offset - tentry->start); 2030 vm_map_unlock_read(smap); 2031 } else { 2032 object = current->object.vm_object; 2033 } 2034 vm_object_sync(object, offset, size, syncio, invalidate); 2035 start += size; 2036 } 2037 2038 vm_map_unlock_read(map); 2039 return (KERN_SUCCESS); 2040 } 2041 2042 /* 2043 * vm_map_entry_unwire: [ internal use only ] 2044 * 2045 * Make the region specified by this entry pageable. 2046 * 2047 * The map in question should be locked. 2048 * [This is the reason for this routine's existence.] 2049 */ 2050 static void 2051 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2052 { 2053 vm_fault_unwire(map, entry->start, entry->end); 2054 entry->wired_count = 0; 2055 } 2056 2057 /* 2058 * vm_map_entry_delete: [ internal use only ] 2059 * 2060 * Deallocate the given entry from the target map. 2061 */ 2062 static void 2063 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2064 { 2065 vm_object_t object; 2066 vm_pindex_t offidxstart, offidxend, count; 2067 2068 vm_map_entry_unlink(map, entry); 2069 map->size -= entry->end - entry->start; 2070 2071 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2072 (object = entry->object.vm_object) != NULL) { 2073 count = OFF_TO_IDX(entry->end - entry->start); 2074 offidxstart = OFF_TO_IDX(entry->offset); 2075 offidxend = offidxstart + count; 2076 VM_OBJECT_LOCK(object); 2077 if (object->ref_count != 1 && 2078 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2079 object == kernel_object || object == kmem_object) && 2080 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2081 vm_object_collapse(object); 2082 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2083 if (object->type == OBJT_SWAP) 2084 swap_pager_freespace(object, offidxstart, count); 2085 if (offidxend >= object->size && 2086 offidxstart < object->size) 2087 object->size = offidxstart; 2088 } 2089 VM_OBJECT_UNLOCK(object); 2090 vm_object_deallocate(object); 2091 } 2092 2093 vm_map_entry_dispose(map, entry); 2094 } 2095 2096 /* 2097 * vm_map_delete: [ internal use only ] 2098 * 2099 * Deallocates the given address range from the target 2100 * map. 2101 */ 2102 int 2103 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2104 { 2105 vm_map_entry_t entry; 2106 vm_map_entry_t first_entry; 2107 2108 /* 2109 * Find the start of the region, and clip it 2110 */ 2111 if (!vm_map_lookup_entry(map, start, &first_entry)) 2112 entry = first_entry->next; 2113 else { 2114 entry = first_entry; 2115 vm_map_clip_start(map, entry, start); 2116 } 2117 2118 /* 2119 * Save the free space hint 2120 */ 2121 if (entry == &map->header) { 2122 map->first_free = &map->header; 2123 } else if (map->first_free->start >= start) { 2124 map->first_free = entry->prev; 2125 } 2126 2127 /* 2128 * Step through all entries in this region 2129 */ 2130 while ((entry != &map->header) && (entry->start < end)) { 2131 vm_map_entry_t next; 2132 2133 /* 2134 * Wait for wiring or unwiring of an entry to complete. 2135 */ 2136 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { 2137 unsigned int last_timestamp; 2138 vm_offset_t saved_start; 2139 vm_map_entry_t tmp_entry; 2140 2141 saved_start = entry->start; 2142 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2143 last_timestamp = map->timestamp; 2144 (void) vm_map_unlock_and_wait(map, FALSE); 2145 vm_map_lock(map); 2146 if (last_timestamp + 1 != map->timestamp) { 2147 /* 2148 * Look again for the entry because the map was 2149 * modified while it was unlocked. 2150 * Specifically, the entry may have been 2151 * clipped, merged, or deleted. 2152 */ 2153 if (!vm_map_lookup_entry(map, saved_start, 2154 &tmp_entry)) 2155 entry = tmp_entry->next; 2156 else { 2157 entry = tmp_entry; 2158 vm_map_clip_start(map, entry, 2159 saved_start); 2160 } 2161 } 2162 continue; 2163 } 2164 vm_map_clip_end(map, entry, end); 2165 2166 next = entry->next; 2167 2168 /* 2169 * Unwire before removing addresses from the pmap; otherwise, 2170 * unwiring will put the entries back in the pmap. 2171 */ 2172 if (entry->wired_count != 0) { 2173 vm_map_entry_unwire(map, entry); 2174 } 2175 2176 if (map != kmem_map) 2177 mtx_lock(&Giant); 2178 vm_page_lock_queues(); 2179 pmap_remove(map->pmap, entry->start, entry->end); 2180 vm_page_unlock_queues(); 2181 if (map != kmem_map) 2182 mtx_unlock(&Giant); 2183 2184 /* 2185 * Delete the entry (which may delete the object) only after 2186 * removing all pmap entries pointing to its pages. 2187 * (Otherwise, its page frames may be reallocated, and any 2188 * modify bits will be set in the wrong object!) 2189 */ 2190 vm_map_entry_delete(map, entry); 2191 entry = next; 2192 } 2193 return (KERN_SUCCESS); 2194 } 2195 2196 /* 2197 * vm_map_remove: 2198 * 2199 * Remove the given address range from the target map. 2200 * This is the exported form of vm_map_delete. 2201 */ 2202 int 2203 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2204 { 2205 int result, s = 0; 2206 2207 if (map == kmem_map) 2208 s = splvm(); 2209 2210 vm_map_lock(map); 2211 VM_MAP_RANGE_CHECK(map, start, end); 2212 result = vm_map_delete(map, start, end); 2213 vm_map_unlock(map); 2214 2215 if (map == kmem_map) 2216 splx(s); 2217 2218 return (result); 2219 } 2220 2221 /* 2222 * vm_map_check_protection: 2223 * 2224 * Assert that the target map allows the specified privilege on the 2225 * entire address region given. The entire region must be allocated. 2226 * 2227 * WARNING! This code does not and should not check whether the 2228 * contents of the region is accessible. For example a smaller file 2229 * might be mapped into a larger address space. 2230 * 2231 * NOTE! This code is also called by munmap(). 2232 * 2233 * The map must be locked. A read lock is sufficient. 2234 */ 2235 boolean_t 2236 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2237 vm_prot_t protection) 2238 { 2239 vm_map_entry_t entry; 2240 vm_map_entry_t tmp_entry; 2241 2242 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2243 return (FALSE); 2244 entry = tmp_entry; 2245 2246 while (start < end) { 2247 if (entry == &map->header) 2248 return (FALSE); 2249 /* 2250 * No holes allowed! 2251 */ 2252 if (start < entry->start) 2253 return (FALSE); 2254 /* 2255 * Check protection associated with entry. 2256 */ 2257 if ((entry->protection & protection) != protection) 2258 return (FALSE); 2259 /* go to next entry */ 2260 start = entry->end; 2261 entry = entry->next; 2262 } 2263 return (TRUE); 2264 } 2265 2266 /* 2267 * vm_map_copy_entry: 2268 * 2269 * Copies the contents of the source entry to the destination 2270 * entry. The entries *must* be aligned properly. 2271 */ 2272 static void 2273 vm_map_copy_entry( 2274 vm_map_t src_map, 2275 vm_map_t dst_map, 2276 vm_map_entry_t src_entry, 2277 vm_map_entry_t dst_entry) 2278 { 2279 vm_object_t src_object; 2280 2281 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2282 return; 2283 2284 if (src_entry->wired_count == 0) { 2285 2286 /* 2287 * If the source entry is marked needs_copy, it is already 2288 * write-protected. 2289 */ 2290 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2291 vm_page_lock_queues(); 2292 pmap_protect(src_map->pmap, 2293 src_entry->start, 2294 src_entry->end, 2295 src_entry->protection & ~VM_PROT_WRITE); 2296 vm_page_unlock_queues(); 2297 } 2298 2299 /* 2300 * Make a copy of the object. 2301 */ 2302 if ((src_object = src_entry->object.vm_object) != NULL) { 2303 VM_OBJECT_LOCK(src_object); 2304 if ((src_object->handle == NULL) && 2305 (src_object->type == OBJT_DEFAULT || 2306 src_object->type == OBJT_SWAP)) { 2307 vm_object_collapse(src_object); 2308 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2309 vm_object_split(src_entry); 2310 src_object = src_entry->object.vm_object; 2311 } 2312 } 2313 vm_object_reference_locked(src_object); 2314 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2315 VM_OBJECT_UNLOCK(src_object); 2316 dst_entry->object.vm_object = src_object; 2317 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2318 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2319 dst_entry->offset = src_entry->offset; 2320 } else { 2321 dst_entry->object.vm_object = NULL; 2322 dst_entry->offset = 0; 2323 } 2324 2325 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2326 dst_entry->end - dst_entry->start, src_entry->start); 2327 } else { 2328 /* 2329 * Of course, wired down pages can't be set copy-on-write. 2330 * Cause wired pages to be copied into the new map by 2331 * simulating faults (the new pages are pageable) 2332 */ 2333 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2334 } 2335 } 2336 2337 /* 2338 * vmspace_fork: 2339 * Create a new process vmspace structure and vm_map 2340 * based on those of an existing process. The new map 2341 * is based on the old map, according to the inheritance 2342 * values on the regions in that map. 2343 * 2344 * The source map must not be locked. 2345 */ 2346 struct vmspace * 2347 vmspace_fork(struct vmspace *vm1) 2348 { 2349 struct vmspace *vm2; 2350 vm_map_t old_map = &vm1->vm_map; 2351 vm_map_t new_map; 2352 vm_map_entry_t old_entry; 2353 vm_map_entry_t new_entry; 2354 vm_object_t object; 2355 2356 GIANT_REQUIRED; 2357 2358 vm_map_lock(old_map); 2359 old_map->infork = 1; 2360 2361 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2362 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2363 (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); 2364 new_map = &vm2->vm_map; /* XXX */ 2365 new_map->timestamp = 1; 2366 2367 /* Do not inherit the MAP_WIREFUTURE property. */ 2368 if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) 2369 new_map->flags &= ~MAP_WIREFUTURE; 2370 2371 old_entry = old_map->header.next; 2372 2373 while (old_entry != &old_map->header) { 2374 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2375 panic("vm_map_fork: encountered a submap"); 2376 2377 switch (old_entry->inheritance) { 2378 case VM_INHERIT_NONE: 2379 break; 2380 2381 case VM_INHERIT_SHARE: 2382 /* 2383 * Clone the entry, creating the shared object if necessary. 2384 */ 2385 object = old_entry->object.vm_object; 2386 if (object == NULL) { 2387 object = vm_object_allocate(OBJT_DEFAULT, 2388 atop(old_entry->end - old_entry->start)); 2389 old_entry->object.vm_object = object; 2390 old_entry->offset = (vm_offset_t) 0; 2391 } 2392 2393 /* 2394 * Add the reference before calling vm_object_shadow 2395 * to insure that a shadow object is created. 2396 */ 2397 vm_object_reference(object); 2398 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2399 vm_object_shadow(&old_entry->object.vm_object, 2400 &old_entry->offset, 2401 atop(old_entry->end - old_entry->start)); 2402 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2403 /* Transfer the second reference too. */ 2404 vm_object_reference( 2405 old_entry->object.vm_object); 2406 vm_object_deallocate(object); 2407 object = old_entry->object.vm_object; 2408 } 2409 VM_OBJECT_LOCK(object); 2410 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2411 VM_OBJECT_UNLOCK(object); 2412 2413 /* 2414 * Clone the entry, referencing the shared object. 2415 */ 2416 new_entry = vm_map_entry_create(new_map); 2417 *new_entry = *old_entry; 2418 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2419 new_entry->wired_count = 0; 2420 2421 /* 2422 * Insert the entry into the new map -- we know we're 2423 * inserting at the end of the new map. 2424 */ 2425 vm_map_entry_link(new_map, new_map->header.prev, 2426 new_entry); 2427 2428 /* 2429 * Update the physical map 2430 */ 2431 pmap_copy(new_map->pmap, old_map->pmap, 2432 new_entry->start, 2433 (old_entry->end - old_entry->start), 2434 old_entry->start); 2435 break; 2436 2437 case VM_INHERIT_COPY: 2438 /* 2439 * Clone the entry and link into the map. 2440 */ 2441 new_entry = vm_map_entry_create(new_map); 2442 *new_entry = *old_entry; 2443 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2444 new_entry->wired_count = 0; 2445 new_entry->object.vm_object = NULL; 2446 vm_map_entry_link(new_map, new_map->header.prev, 2447 new_entry); 2448 vm_map_copy_entry(old_map, new_map, old_entry, 2449 new_entry); 2450 break; 2451 } 2452 old_entry = old_entry->next; 2453 } 2454 2455 new_map->size = old_map->size; 2456 old_map->infork = 0; 2457 vm_map_unlock(old_map); 2458 2459 return (vm2); 2460 } 2461 2462 int 2463 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2464 vm_prot_t prot, vm_prot_t max, int cow) 2465 { 2466 vm_map_entry_t new_entry, prev_entry; 2467 vm_offset_t bot, top; 2468 vm_size_t init_ssize; 2469 int orient, rv; 2470 2471 /* 2472 * The stack orientation is piggybacked with the cow argument. 2473 * Extract it into orient and mask the cow argument so that we 2474 * don't pass it around further. 2475 * NOTE: We explicitly allow bi-directional stacks. 2476 */ 2477 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 2478 cow &= ~orient; 2479 KASSERT(orient != 0, ("No stack grow direction")); 2480 2481 if (addrbos < vm_map_min(map) || addrbos > map->max_offset) 2482 return (KERN_NO_SPACE); 2483 2484 init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 2485 2486 vm_map_lock(map); 2487 2488 /* If addr is already mapped, no go */ 2489 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2490 vm_map_unlock(map); 2491 return (KERN_NO_SPACE); 2492 } 2493 2494 /* If we would blow our VMEM resource limit, no go */ 2495 if (map->size + init_ssize > 2496 curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 2497 vm_map_unlock(map); 2498 return (KERN_NO_SPACE); 2499 } 2500 2501 /* 2502 * If we can't accomodate max_ssize in the current mapping, no go. 2503 * However, we need to be aware that subsequent user mappings might 2504 * map into the space we have reserved for stack, and currently this 2505 * space is not protected. 2506 * 2507 * Hopefully we will at least detect this condition when we try to 2508 * grow the stack. 2509 */ 2510 if ((prev_entry->next != &map->header) && 2511 (prev_entry->next->start < addrbos + max_ssize)) { 2512 vm_map_unlock(map); 2513 return (KERN_NO_SPACE); 2514 } 2515 2516 /* 2517 * We initially map a stack of only init_ssize. We will grow as 2518 * needed later. Depending on the orientation of the stack (i.e. 2519 * the grow direction) we either map at the top of the range, the 2520 * bottom of the range or in the middle. 2521 * 2522 * Note: we would normally expect prot and max to be VM_PROT_ALL, 2523 * and cow to be 0. Possibly we should eliminate these as input 2524 * parameters, and just pass these values here in the insert call. 2525 */ 2526 if (orient == MAP_STACK_GROWS_DOWN) 2527 bot = addrbos + max_ssize - init_ssize; 2528 else if (orient == MAP_STACK_GROWS_UP) 2529 bot = addrbos; 2530 else 2531 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 2532 top = bot + init_ssize; 2533 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 2534 2535 /* Now set the avail_ssize amount. */ 2536 if (rv == KERN_SUCCESS) { 2537 if (prev_entry != &map->header) 2538 vm_map_clip_end(map, prev_entry, bot); 2539 new_entry = prev_entry->next; 2540 if (new_entry->end != top || new_entry->start != bot) 2541 panic("Bad entry start/end for new stack entry"); 2542 2543 new_entry->avail_ssize = max_ssize - init_ssize; 2544 if (orient & MAP_STACK_GROWS_DOWN) 2545 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2546 if (orient & MAP_STACK_GROWS_UP) 2547 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 2548 } 2549 2550 vm_map_unlock(map); 2551 return (rv); 2552 } 2553 2554 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2555 * desired address is already mapped, or if we successfully grow 2556 * the stack. Also returns KERN_SUCCESS if addr is outside the 2557 * stack range (this is strange, but preserves compatibility with 2558 * the grow function in vm_machdep.c). 2559 */ 2560 int 2561 vm_map_growstack(struct proc *p, vm_offset_t addr) 2562 { 2563 vm_map_entry_t next_entry, prev_entry; 2564 vm_map_entry_t new_entry, stack_entry; 2565 struct vmspace *vm = p->p_vmspace; 2566 vm_map_t map = &vm->vm_map; 2567 vm_offset_t end; 2568 size_t grow_amount, max_grow; 2569 int is_procstack, rv; 2570 2571 GIANT_REQUIRED; 2572 2573 Retry: 2574 vm_map_lock_read(map); 2575 2576 /* If addr is already in the entry range, no need to grow.*/ 2577 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2578 vm_map_unlock_read(map); 2579 return (KERN_SUCCESS); 2580 } 2581 2582 next_entry = prev_entry->next; 2583 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 2584 /* 2585 * This entry does not grow upwards. Since the address lies 2586 * beyond this entry, the next entry (if one exists) has to 2587 * be a downward growable entry. The entry list header is 2588 * never a growable entry, so it suffices to check the flags. 2589 */ 2590 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 2591 vm_map_unlock_read(map); 2592 return (KERN_SUCCESS); 2593 } 2594 stack_entry = next_entry; 2595 } else { 2596 /* 2597 * This entry grows upward. If the next entry does not at 2598 * least grow downwards, this is the entry we need to grow. 2599 * otherwise we have two possible choices and we have to 2600 * select one. 2601 */ 2602 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 2603 /* 2604 * We have two choices; grow the entry closest to 2605 * the address to minimize the amount of growth. 2606 */ 2607 if (addr - prev_entry->end <= next_entry->start - addr) 2608 stack_entry = prev_entry; 2609 else 2610 stack_entry = next_entry; 2611 } else 2612 stack_entry = prev_entry; 2613 } 2614 2615 if (stack_entry == next_entry) { 2616 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 2617 KASSERT(addr < stack_entry->start, ("foo")); 2618 end = (prev_entry != &map->header) ? prev_entry->end : 2619 stack_entry->start - stack_entry->avail_ssize; 2620 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 2621 max_grow = stack_entry->start - end; 2622 } else { 2623 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 2624 KASSERT(addr >= stack_entry->end, ("foo")); 2625 end = (next_entry != &map->header) ? next_entry->start : 2626 stack_entry->end + stack_entry->avail_ssize; 2627 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 2628 max_grow = end - stack_entry->end; 2629 } 2630 2631 if (grow_amount > stack_entry->avail_ssize) { 2632 vm_map_unlock_read(map); 2633 return (KERN_NO_SPACE); 2634 } 2635 2636 /* 2637 * If there is no longer enough space between the entries nogo, and 2638 * adjust the available space. Note: this should only happen if the 2639 * user has mapped into the stack area after the stack was created, 2640 * and is probably an error. 2641 * 2642 * This also effectively destroys any guard page the user might have 2643 * intended by limiting the stack size. 2644 */ 2645 if (grow_amount > max_grow) { 2646 if (vm_map_lock_upgrade(map)) 2647 goto Retry; 2648 2649 stack_entry->avail_ssize = max_grow; 2650 2651 vm_map_unlock(map); 2652 return (KERN_NO_SPACE); 2653 } 2654 2655 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 2656 2657 /* 2658 * If this is the main process stack, see if we're over the stack 2659 * limit. 2660 */ 2661 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2662 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2663 vm_map_unlock_read(map); 2664 return (KERN_NO_SPACE); 2665 } 2666 2667 /* Round up the grow amount modulo SGROWSIZ */ 2668 grow_amount = roundup (grow_amount, sgrowsiz); 2669 if (grow_amount > stack_entry->avail_ssize) 2670 grow_amount = stack_entry->avail_ssize; 2671 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2672 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2673 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2674 ctob(vm->vm_ssize); 2675 } 2676 2677 /* If we would blow our VMEM resource limit, no go */ 2678 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 2679 vm_map_unlock_read(map); 2680 return (KERN_NO_SPACE); 2681 } 2682 2683 if (vm_map_lock_upgrade(map)) 2684 goto Retry; 2685 2686 if (stack_entry == next_entry) { 2687 /* 2688 * Growing downward. 2689 */ 2690 /* Get the preliminary new entry start value */ 2691 addr = stack_entry->start - grow_amount; 2692 2693 /* 2694 * If this puts us into the previous entry, cut back our 2695 * growth to the available space. Also, see the note above. 2696 */ 2697 if (addr < end) { 2698 stack_entry->avail_ssize = max_grow; 2699 addr = end; 2700 } 2701 2702 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2703 p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 2704 2705 /* Adjust the available stack space by the amount we grew. */ 2706 if (rv == KERN_SUCCESS) { 2707 if (prev_entry != &map->header) 2708 vm_map_clip_end(map, prev_entry, addr); 2709 new_entry = prev_entry->next; 2710 KASSERT(new_entry == stack_entry->prev, ("foo")); 2711 KASSERT(new_entry->end == stack_entry->start, ("foo")); 2712 KASSERT(new_entry->start == addr, ("foo")); 2713 grow_amount = new_entry->end - new_entry->start; 2714 new_entry->avail_ssize = stack_entry->avail_ssize - 2715 grow_amount; 2716 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 2717 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2718 } 2719 } else { 2720 /* 2721 * Growing upward. 2722 */ 2723 addr = stack_entry->end + grow_amount; 2724 2725 /* 2726 * If this puts us into the next entry, cut back our growth 2727 * to the available space. Also, see the note above. 2728 */ 2729 if (addr > end) { 2730 stack_entry->avail_ssize = end - stack_entry->end; 2731 addr = end; 2732 } 2733 2734 grow_amount = addr - stack_entry->end; 2735 2736 /* Grow the underlying object if applicable. */ 2737 if (stack_entry->object.vm_object == NULL || 2738 vm_object_coalesce(stack_entry->object.vm_object, 2739 OFF_TO_IDX(stack_entry->offset), 2740 (vm_size_t)(stack_entry->end - stack_entry->start), 2741 (vm_size_t)grow_amount)) { 2742 map->size += (addr - stack_entry->end); 2743 /* Update the current entry. */ 2744 stack_entry->end = addr; 2745 stack_entry->avail_ssize -= grow_amount; 2746 rv = KERN_SUCCESS; 2747 2748 if (next_entry != &map->header) 2749 vm_map_clip_start(map, next_entry, addr); 2750 } else 2751 rv = KERN_FAILURE; 2752 } 2753 2754 if (rv == KERN_SUCCESS && is_procstack) 2755 vm->vm_ssize += btoc(grow_amount); 2756 2757 vm_map_unlock(map); 2758 2759 /* 2760 * Heed the MAP_WIREFUTURE flag if it was set for this process. 2761 */ 2762 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 2763 vm_map_wire(map, 2764 (stack_entry == next_entry) ? addr : addr - grow_amount, 2765 (stack_entry == next_entry) ? stack_entry->start : addr, 2766 (p->p_flag & P_SYSTEM) 2767 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 2768 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 2769 } 2770 2771 return (rv); 2772 } 2773 2774 /* 2775 * Unshare the specified VM space for exec. If other processes are 2776 * mapped to it, then create a new one. The new vmspace is null. 2777 */ 2778 void 2779 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 2780 { 2781 struct vmspace *oldvmspace = p->p_vmspace; 2782 struct vmspace *newvmspace; 2783 2784 GIANT_REQUIRED; 2785 newvmspace = vmspace_alloc(minuser, maxuser); 2786 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2787 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2788 /* 2789 * This code is written like this for prototype purposes. The 2790 * goal is to avoid running down the vmspace here, but let the 2791 * other process's that are still using the vmspace to finally 2792 * run it down. Even though there is little or no chance of blocking 2793 * here, it is a good idea to keep this form for future mods. 2794 */ 2795 p->p_vmspace = newvmspace; 2796 pmap_pinit2(vmspace_pmap(newvmspace)); 2797 vmspace_free(oldvmspace); 2798 if (p == curthread->td_proc) /* XXXKSE ? */ 2799 pmap_activate(curthread); 2800 } 2801 2802 /* 2803 * Unshare the specified VM space for forcing COW. This 2804 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2805 */ 2806 void 2807 vmspace_unshare(struct proc *p) 2808 { 2809 struct vmspace *oldvmspace = p->p_vmspace; 2810 struct vmspace *newvmspace; 2811 2812 GIANT_REQUIRED; 2813 if (oldvmspace->vm_refcnt == 1) 2814 return; 2815 newvmspace = vmspace_fork(oldvmspace); 2816 p->p_vmspace = newvmspace; 2817 pmap_pinit2(vmspace_pmap(newvmspace)); 2818 vmspace_free(oldvmspace); 2819 if (p == curthread->td_proc) /* XXXKSE ? */ 2820 pmap_activate(curthread); 2821 } 2822 2823 /* 2824 * vm_map_lookup: 2825 * 2826 * Finds the VM object, offset, and 2827 * protection for a given virtual address in the 2828 * specified map, assuming a page fault of the 2829 * type specified. 2830 * 2831 * Leaves the map in question locked for read; return 2832 * values are guaranteed until a vm_map_lookup_done 2833 * call is performed. Note that the map argument 2834 * is in/out; the returned map must be used in 2835 * the call to vm_map_lookup_done. 2836 * 2837 * A handle (out_entry) is returned for use in 2838 * vm_map_lookup_done, to make that fast. 2839 * 2840 * If a lookup is requested with "write protection" 2841 * specified, the map may be changed to perform virtual 2842 * copying operations, although the data referenced will 2843 * remain the same. 2844 */ 2845 int 2846 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2847 vm_offset_t vaddr, 2848 vm_prot_t fault_typea, 2849 vm_map_entry_t *out_entry, /* OUT */ 2850 vm_object_t *object, /* OUT */ 2851 vm_pindex_t *pindex, /* OUT */ 2852 vm_prot_t *out_prot, /* OUT */ 2853 boolean_t *wired) /* OUT */ 2854 { 2855 vm_map_entry_t entry; 2856 vm_map_t map = *var_map; 2857 vm_prot_t prot; 2858 vm_prot_t fault_type = fault_typea; 2859 2860 RetryLookup:; 2861 /* 2862 * Lookup the faulting address. 2863 */ 2864 2865 vm_map_lock_read(map); 2866 #define RETURN(why) \ 2867 { \ 2868 vm_map_unlock_read(map); \ 2869 return (why); \ 2870 } 2871 2872 /* 2873 * If the map has an interesting hint, try it before calling full 2874 * blown lookup routine. 2875 */ 2876 entry = map->root; 2877 *out_entry = entry; 2878 if (entry == NULL || 2879 (vaddr < entry->start) || (vaddr >= entry->end)) { 2880 /* 2881 * Entry was either not a valid hint, or the vaddr was not 2882 * contained in the entry, so do a full lookup. 2883 */ 2884 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 2885 RETURN(KERN_INVALID_ADDRESS); 2886 2887 entry = *out_entry; 2888 } 2889 2890 /* 2891 * Handle submaps. 2892 */ 2893 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2894 vm_map_t old_map = map; 2895 2896 *var_map = map = entry->object.sub_map; 2897 vm_map_unlock_read(old_map); 2898 goto RetryLookup; 2899 } 2900 2901 /* 2902 * Check whether this task is allowed to have this page. 2903 * Note the special case for MAP_ENTRY_COW 2904 * pages with an override. This is to implement a forced 2905 * COW for debuggers. 2906 */ 2907 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2908 prot = entry->max_protection; 2909 else 2910 prot = entry->protection; 2911 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2912 if ((fault_type & prot) != fault_type) { 2913 RETURN(KERN_PROTECTION_FAILURE); 2914 } 2915 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2916 (entry->eflags & MAP_ENTRY_COW) && 2917 (fault_type & VM_PROT_WRITE) && 2918 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2919 RETURN(KERN_PROTECTION_FAILURE); 2920 } 2921 2922 /* 2923 * If this page is not pageable, we have to get it for all possible 2924 * accesses. 2925 */ 2926 *wired = (entry->wired_count != 0); 2927 if (*wired) 2928 prot = fault_type = entry->protection; 2929 2930 /* 2931 * If the entry was copy-on-write, we either ... 2932 */ 2933 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2934 /* 2935 * If we want to write the page, we may as well handle that 2936 * now since we've got the map locked. 2937 * 2938 * If we don't need to write the page, we just demote the 2939 * permissions allowed. 2940 */ 2941 if (fault_type & VM_PROT_WRITE) { 2942 /* 2943 * Make a new object, and place it in the object 2944 * chain. Note that no new references have appeared 2945 * -- one just moved from the map to the new 2946 * object. 2947 */ 2948 if (vm_map_lock_upgrade(map)) 2949 goto RetryLookup; 2950 2951 vm_object_shadow( 2952 &entry->object.vm_object, 2953 &entry->offset, 2954 atop(entry->end - entry->start)); 2955 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2956 2957 vm_map_lock_downgrade(map); 2958 } else { 2959 /* 2960 * We're attempting to read a copy-on-write page -- 2961 * don't allow writes. 2962 */ 2963 prot &= ~VM_PROT_WRITE; 2964 } 2965 } 2966 2967 /* 2968 * Create an object if necessary. 2969 */ 2970 if (entry->object.vm_object == NULL && 2971 !map->system_map) { 2972 if (vm_map_lock_upgrade(map)) 2973 goto RetryLookup; 2974 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2975 atop(entry->end - entry->start)); 2976 entry->offset = 0; 2977 vm_map_lock_downgrade(map); 2978 } 2979 2980 /* 2981 * Return the object/offset from this entry. If the entry was 2982 * copy-on-write or empty, it has been fixed up. 2983 */ 2984 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2985 *object = entry->object.vm_object; 2986 2987 /* 2988 * Return whether this is the only map sharing this data. 2989 */ 2990 *out_prot = prot; 2991 return (KERN_SUCCESS); 2992 2993 #undef RETURN 2994 } 2995 2996 /* 2997 * vm_map_lookup_done: 2998 * 2999 * Releases locks acquired by a vm_map_lookup 3000 * (according to the handle returned by that lookup). 3001 */ 3002 void 3003 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3004 { 3005 /* 3006 * Unlock the main-level map 3007 */ 3008 vm_map_unlock_read(map); 3009 } 3010 3011 #include "opt_ddb.h" 3012 #ifdef DDB 3013 #include <sys/kernel.h> 3014 3015 #include <ddb/ddb.h> 3016 3017 /* 3018 * vm_map_print: [ debug ] 3019 */ 3020 DB_SHOW_COMMAND(map, vm_map_print) 3021 { 3022 static int nlines; 3023 /* XXX convert args. */ 3024 vm_map_t map = (vm_map_t)addr; 3025 boolean_t full = have_addr; 3026 3027 vm_map_entry_t entry; 3028 3029 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3030 (void *)map, 3031 (void *)map->pmap, map->nentries, map->timestamp); 3032 nlines++; 3033 3034 if (!full && db_indent) 3035 return; 3036 3037 db_indent += 2; 3038 for (entry = map->header.next; entry != &map->header; 3039 entry = entry->next) { 3040 db_iprintf("map entry %p: start=%p, end=%p\n", 3041 (void *)entry, (void *)entry->start, (void *)entry->end); 3042 nlines++; 3043 { 3044 static char *inheritance_name[4] = 3045 {"share", "copy", "none", "donate_copy"}; 3046 3047 db_iprintf(" prot=%x/%x/%s", 3048 entry->protection, 3049 entry->max_protection, 3050 inheritance_name[(int)(unsigned char)entry->inheritance]); 3051 if (entry->wired_count != 0) 3052 db_printf(", wired"); 3053 } 3054 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3055 db_printf(", share=%p, offset=0x%jx\n", 3056 (void *)entry->object.sub_map, 3057 (uintmax_t)entry->offset); 3058 nlines++; 3059 if ((entry->prev == &map->header) || 3060 (entry->prev->object.sub_map != 3061 entry->object.sub_map)) { 3062 db_indent += 2; 3063 vm_map_print((db_expr_t)(intptr_t) 3064 entry->object.sub_map, 3065 full, 0, (char *)0); 3066 db_indent -= 2; 3067 } 3068 } else { 3069 db_printf(", object=%p, offset=0x%jx", 3070 (void *)entry->object.vm_object, 3071 (uintmax_t)entry->offset); 3072 if (entry->eflags & MAP_ENTRY_COW) 3073 db_printf(", copy (%s)", 3074 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3075 db_printf("\n"); 3076 nlines++; 3077 3078 if ((entry->prev == &map->header) || 3079 (entry->prev->object.vm_object != 3080 entry->object.vm_object)) { 3081 db_indent += 2; 3082 vm_object_print((db_expr_t)(intptr_t) 3083 entry->object.vm_object, 3084 full, 0, (char *)0); 3085 nlines += 4; 3086 db_indent -= 2; 3087 } 3088 } 3089 } 3090 db_indent -= 2; 3091 if (db_indent == 0) 3092 nlines = 0; 3093 } 3094 3095 3096 DB_SHOW_COMMAND(procvm, procvm) 3097 { 3098 struct proc *p; 3099 3100 if (have_addr) { 3101 p = (struct proc *) addr; 3102 } else { 3103 p = curproc; 3104 } 3105 3106 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3107 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3108 (void *)vmspace_pmap(p->p_vmspace)); 3109 3110 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3111 } 3112 3113 #endif /* DDB */ 3114