1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/vmmeter.h> 75 #include <sys/mman.h> 76 #include <sys/vnode.h> 77 #include <sys/resourcevar.h> 78 #include <sys/file.h> 79 #include <sys/sysent.h> 80 #include <sys/shm.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 /* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a single hint is used to speed up lookups. 104 * 105 * Since portions of maps are specified by start/end addresses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * 113 * As mentioned above, virtual copy operations are performed 114 * by copying VM object references from one map to 115 * another, and then marking both regions as copy-on-write. 116 */ 117 118 /* 119 * vm_map_startup: 120 * 121 * Initialize the vm_map module. Must be called before 122 * any other vm_map routines. 123 * 124 * Map and entry structures are allocated from the general 125 * purpose memory pool with some exceptions: 126 * 127 * - The kernel map and kmem submap are allocated statically. 128 * - Kernel map entries are allocated out of a static pool. 129 * 130 * These restrictions are necessary since malloc() uses the 131 * maps and requires map entries. 132 */ 133 134 static struct mtx map_sleep_mtx; 135 static uma_zone_t mapentzone; 136 static uma_zone_t kmapentzone; 137 static uma_zone_t mapzone; 138 static uma_zone_t vmspace_zone; 139 static struct vm_object kmapentobj; 140 static void vmspace_zinit(void *mem, int size); 141 static void vmspace_zfini(void *mem, int size); 142 static void vm_map_zinit(void *mem, int size); 143 static void vm_map_zfini(void *mem, int size); 144 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); 145 146 #ifdef INVARIANTS 147 static void vm_map_zdtor(void *mem, int size, void *arg); 148 static void vmspace_zdtor(void *mem, int size, void *arg); 149 #endif 150 151 void 152 vm_map_startup(void) 153 { 154 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 155 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 156 #ifdef INVARIANTS 157 vm_map_zdtor, 158 #else 159 NULL, 160 #endif 161 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 162 uma_prealloc(mapzone, MAX_KMAP); 163 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 164 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 165 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 166 uma_prealloc(kmapentzone, MAX_KMAPENT); 167 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 168 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 169 uma_prealloc(mapentzone, MAX_MAPENT); 170 } 171 172 static void 173 vmspace_zfini(void *mem, int size) 174 { 175 struct vmspace *vm; 176 177 vm = (struct vmspace *)mem; 178 pmap_release(vmspace_pmap(vm)); 179 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 180 } 181 182 static void 183 vmspace_zinit(void *mem, int size) 184 { 185 struct vmspace *vm; 186 187 vm = (struct vmspace *)mem; 188 189 vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map)); 190 pmap_pinit(vmspace_pmap(vm)); 191 } 192 193 static void 194 vm_map_zfini(void *mem, int size) 195 { 196 vm_map_t map; 197 198 map = (vm_map_t)mem; 199 mtx_destroy(&map->system_mtx); 200 sx_destroy(&map->lock); 201 } 202 203 static void 204 vm_map_zinit(void *mem, int size) 205 { 206 vm_map_t map; 207 208 map = (vm_map_t)mem; 209 map->nentries = 0; 210 map->size = 0; 211 map->infork = 0; 212 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 213 sx_init(&map->lock, "user map"); 214 } 215 216 #ifdef INVARIANTS 217 static void 218 vmspace_zdtor(void *mem, int size, void *arg) 219 { 220 struct vmspace *vm; 221 222 vm = (struct vmspace *)mem; 223 224 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 225 } 226 static void 227 vm_map_zdtor(void *mem, int size, void *arg) 228 { 229 vm_map_t map; 230 231 map = (vm_map_t)mem; 232 KASSERT(map->nentries == 0, 233 ("map %p nentries == %d on free.", 234 map, map->nentries)); 235 KASSERT(map->size == 0, 236 ("map %p size == %lu on free.", 237 map, (unsigned long)map->size)); 238 KASSERT(map->infork == 0, 239 ("map %p infork == %d on free.", 240 map, map->infork)); 241 } 242 #endif /* INVARIANTS */ 243 244 /* 245 * Allocate a vmspace structure, including a vm_map and pmap, 246 * and initialize those structures. The refcnt is set to 1. 247 */ 248 struct vmspace * 249 vmspace_alloc(min, max) 250 vm_offset_t min, max; 251 { 252 struct vmspace *vm; 253 254 vm = uma_zalloc(vmspace_zone, M_WAITOK); 255 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 256 _vm_map_init(&vm->vm_map, min, max); 257 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 258 vm->vm_refcnt = 1; 259 vm->vm_shm = NULL; 260 vm->vm_swrss = 0; 261 vm->vm_tsize = 0; 262 vm->vm_dsize = 0; 263 vm->vm_ssize = 0; 264 vm->vm_taddr = 0; 265 vm->vm_daddr = 0; 266 vm->vm_maxsaddr = 0; 267 vm->vm_exitingcnt = 0; 268 return (vm); 269 } 270 271 void 272 vm_init2(void) 273 { 274 uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 275 (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + 276 maxproc * 2 + maxfiles); 277 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 278 #ifdef INVARIANTS 279 vmspace_zdtor, 280 #else 281 NULL, 282 #endif 283 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 284 pmap_init2(); 285 } 286 287 static __inline void 288 vmspace_dofree(struct vmspace *vm) 289 { 290 CTR1(KTR_VM, "vmspace_free: %p", vm); 291 292 /* 293 * Make sure any SysV shm is freed, it might not have been in 294 * exit1(). 295 */ 296 shmexit(vm); 297 298 /* 299 * Lock the map, to wait out all other references to it. 300 * Delete all of the mappings and pages they hold, then call 301 * the pmap module to reclaim anything left. 302 */ 303 vm_map_lock(&vm->vm_map); 304 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 305 vm->vm_map.max_offset); 306 vm_map_unlock(&vm->vm_map); 307 308 uma_zfree(vmspace_zone, vm); 309 } 310 311 void 312 vmspace_free(struct vmspace *vm) 313 { 314 int refcnt; 315 316 if (vm->vm_refcnt == 0) 317 panic("vmspace_free: attempt to free already freed vmspace"); 318 319 do 320 refcnt = vm->vm_refcnt; 321 while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 322 if (refcnt == 1 && vm->vm_exitingcnt == 0) 323 vmspace_dofree(vm); 324 } 325 326 void 327 vmspace_exitfree(struct proc *p) 328 { 329 struct vmspace *vm; 330 int exitingcnt; 331 332 vm = p->p_vmspace; 333 p->p_vmspace = NULL; 334 335 /* 336 * cleanup by parent process wait()ing on exiting child. vm_refcnt 337 * may not be 0 (e.g. fork() and child exits without exec()ing). 338 * exitingcnt may increment above 0 and drop back down to zero 339 * several times while vm_refcnt is held non-zero. vm_refcnt 340 * may also increment above 0 and drop back down to zero several 341 * times while vm_exitingcnt is held non-zero. 342 * 343 * The last wait on the exiting child's vmspace will clean up 344 * the remainder of the vmspace. 345 */ 346 do 347 exitingcnt = vm->vm_exitingcnt; 348 while (!atomic_cmpset_int(&vm->vm_exitingcnt, exitingcnt, 349 exitingcnt - 1)); 350 if (vm->vm_refcnt == 0 && exitingcnt == 1) 351 vmspace_dofree(vm); 352 } 353 354 void 355 _vm_map_lock(vm_map_t map, const char *file, int line) 356 { 357 358 if (map->system_map) 359 _mtx_lock_flags(&map->system_mtx, 0, file, line); 360 else 361 _sx_xlock(&map->lock, file, line); 362 map->timestamp++; 363 } 364 365 void 366 _vm_map_unlock(vm_map_t map, const char *file, int line) 367 { 368 369 if (map->system_map) 370 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 371 else 372 _sx_xunlock(&map->lock, file, line); 373 } 374 375 void 376 _vm_map_lock_read(vm_map_t map, const char *file, int line) 377 { 378 379 if (map->system_map) 380 _mtx_lock_flags(&map->system_mtx, 0, file, line); 381 else 382 _sx_xlock(&map->lock, file, line); 383 } 384 385 void 386 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 387 { 388 389 if (map->system_map) 390 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 391 else 392 _sx_xunlock(&map->lock, file, line); 393 } 394 395 int 396 _vm_map_trylock(vm_map_t map, const char *file, int line) 397 { 398 int error; 399 400 error = map->system_map ? 401 !_mtx_trylock(&map->system_mtx, 0, file, line) : 402 !_sx_try_xlock(&map->lock, file, line); 403 if (error == 0) 404 map->timestamp++; 405 return (error == 0); 406 } 407 408 int 409 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 410 { 411 int error; 412 413 error = map->system_map ? 414 !_mtx_trylock(&map->system_mtx, 0, file, line) : 415 !_sx_try_xlock(&map->lock, file, line); 416 return (error == 0); 417 } 418 419 int 420 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 421 { 422 423 #ifdef INVARIANTS 424 if (map->system_map) { 425 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 426 } else 427 _sx_assert(&map->lock, SX_XLOCKED, file, line); 428 #endif 429 map->timestamp++; 430 return (0); 431 } 432 433 void 434 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 435 { 436 437 #ifdef INVARIANTS 438 if (map->system_map) { 439 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 440 } else 441 _sx_assert(&map->lock, SX_XLOCKED, file, line); 442 #endif 443 } 444 445 /* 446 * vm_map_unlock_and_wait: 447 */ 448 int 449 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) 450 { 451 452 mtx_lock(&map_sleep_mtx); 453 vm_map_unlock(map); 454 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0)); 455 } 456 457 /* 458 * vm_map_wakeup: 459 */ 460 void 461 vm_map_wakeup(vm_map_t map) 462 { 463 464 /* 465 * Acquire and release map_sleep_mtx to prevent a wakeup() 466 * from being performed (and lost) between the vm_map_unlock() 467 * and the msleep() in vm_map_unlock_and_wait(). 468 */ 469 mtx_lock(&map_sleep_mtx); 470 mtx_unlock(&map_sleep_mtx); 471 wakeup(&map->root); 472 } 473 474 long 475 vmspace_resident_count(struct vmspace *vmspace) 476 { 477 return pmap_resident_count(vmspace_pmap(vmspace)); 478 } 479 480 long 481 vmspace_wired_count(struct vmspace *vmspace) 482 { 483 return pmap_wired_count(vmspace_pmap(vmspace)); 484 } 485 486 /* 487 * vm_map_create: 488 * 489 * Creates and returns a new empty VM map with 490 * the given physical map structure, and having 491 * the given lower and upper address bounds. 492 */ 493 vm_map_t 494 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 495 { 496 vm_map_t result; 497 498 result = uma_zalloc(mapzone, M_WAITOK); 499 CTR1(KTR_VM, "vm_map_create: %p", result); 500 _vm_map_init(result, min, max); 501 result->pmap = pmap; 502 return (result); 503 } 504 505 /* 506 * Initialize an existing vm_map structure 507 * such as that in the vmspace structure. 508 * The pmap is set elsewhere. 509 */ 510 static void 511 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 512 { 513 514 map->header.next = map->header.prev = &map->header; 515 map->needs_wakeup = FALSE; 516 map->system_map = 0; 517 map->min_offset = min; 518 map->max_offset = max; 519 map->first_free = &map->header; 520 map->flags = 0; 521 map->root = NULL; 522 map->timestamp = 0; 523 } 524 525 void 526 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 527 { 528 _vm_map_init(map, min, max); 529 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 530 sx_init(&map->lock, "user map"); 531 } 532 533 /* 534 * vm_map_entry_dispose: [ internal use only ] 535 * 536 * Inverse of vm_map_entry_create. 537 */ 538 static void 539 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 540 { 541 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 542 } 543 544 /* 545 * vm_map_entry_create: [ internal use only ] 546 * 547 * Allocates a VM map entry for insertion. 548 * No entry fields are filled in. 549 */ 550 static vm_map_entry_t 551 vm_map_entry_create(vm_map_t map) 552 { 553 vm_map_entry_t new_entry; 554 555 if (map->system_map) 556 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 557 else 558 new_entry = uma_zalloc(mapentzone, M_WAITOK); 559 if (new_entry == NULL) 560 panic("vm_map_entry_create: kernel resources exhausted"); 561 return (new_entry); 562 } 563 564 /* 565 * vm_map_entry_set_behavior: 566 * 567 * Set the expected access behavior, either normal, random, or 568 * sequential. 569 */ 570 static __inline void 571 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 572 { 573 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 574 (behavior & MAP_ENTRY_BEHAV_MASK); 575 } 576 577 /* 578 * vm_map_entry_splay: 579 * 580 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 581 * the vm_map_entry containing the given address. If, however, that 582 * address is not found in the vm_map, returns a vm_map_entry that is 583 * adjacent to the address, coming before or after it. 584 */ 585 static vm_map_entry_t 586 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root) 587 { 588 struct vm_map_entry dummy; 589 vm_map_entry_t lefttreemax, righttreemin, y; 590 591 if (root == NULL) 592 return (root); 593 lefttreemax = righttreemin = &dummy; 594 for (;; root = y) { 595 if (address < root->start) { 596 if ((y = root->left) == NULL) 597 break; 598 if (address < y->start) { 599 /* Rotate right. */ 600 root->left = y->right; 601 y->right = root; 602 root = y; 603 if ((y = root->left) == NULL) 604 break; 605 } 606 /* Link into the new root's right tree. */ 607 righttreemin->left = root; 608 righttreemin = root; 609 } else if (address >= root->end) { 610 if ((y = root->right) == NULL) 611 break; 612 if (address >= y->end) { 613 /* Rotate left. */ 614 root->right = y->left; 615 y->left = root; 616 root = y; 617 if ((y = root->right) == NULL) 618 break; 619 } 620 /* Link into the new root's left tree. */ 621 lefttreemax->right = root; 622 lefttreemax = root; 623 } else 624 break; 625 } 626 /* Assemble the new root. */ 627 lefttreemax->right = root->left; 628 righttreemin->left = root->right; 629 root->left = dummy.right; 630 root->right = dummy.left; 631 return (root); 632 } 633 634 /* 635 * vm_map_entry_{un,}link: 636 * 637 * Insert/remove entries from maps. 638 */ 639 static void 640 vm_map_entry_link(vm_map_t map, 641 vm_map_entry_t after_where, 642 vm_map_entry_t entry) 643 { 644 645 CTR4(KTR_VM, 646 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 647 map->nentries, entry, after_where); 648 map->nentries++; 649 entry->prev = after_where; 650 entry->next = after_where->next; 651 entry->next->prev = entry; 652 after_where->next = entry; 653 654 if (after_where != &map->header) { 655 if (after_where != map->root) 656 vm_map_entry_splay(after_where->start, map->root); 657 entry->right = after_where->right; 658 entry->left = after_where; 659 after_where->right = NULL; 660 } else { 661 entry->right = map->root; 662 entry->left = NULL; 663 } 664 map->root = entry; 665 } 666 667 static void 668 vm_map_entry_unlink(vm_map_t map, 669 vm_map_entry_t entry) 670 { 671 vm_map_entry_t next, prev, root; 672 673 if (entry != map->root) 674 vm_map_entry_splay(entry->start, map->root); 675 if (entry->left == NULL) 676 root = entry->right; 677 else { 678 root = vm_map_entry_splay(entry->start, entry->left); 679 root->right = entry->right; 680 } 681 map->root = root; 682 683 prev = entry->prev; 684 next = entry->next; 685 next->prev = prev; 686 prev->next = next; 687 map->nentries--; 688 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 689 map->nentries, entry); 690 } 691 692 /* 693 * vm_map_lookup_entry: [ internal use only ] 694 * 695 * Finds the map entry containing (or 696 * immediately preceding) the specified address 697 * in the given map; the entry is returned 698 * in the "entry" parameter. The boolean 699 * result indicates whether the address is 700 * actually contained in the map. 701 */ 702 boolean_t 703 vm_map_lookup_entry( 704 vm_map_t map, 705 vm_offset_t address, 706 vm_map_entry_t *entry) /* OUT */ 707 { 708 vm_map_entry_t cur; 709 710 cur = vm_map_entry_splay(address, map->root); 711 if (cur == NULL) 712 *entry = &map->header; 713 else { 714 map->root = cur; 715 716 if (address >= cur->start) { 717 *entry = cur; 718 if (cur->end > address) 719 return (TRUE); 720 } else 721 *entry = cur->prev; 722 } 723 return (FALSE); 724 } 725 726 /* 727 * vm_map_insert: 728 * 729 * Inserts the given whole VM object into the target 730 * map at the specified address range. The object's 731 * size should match that of the address range. 732 * 733 * Requires that the map be locked, and leaves it so. 734 * 735 * If object is non-NULL, ref count must be bumped by caller 736 * prior to making call to account for the new entry. 737 */ 738 int 739 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 740 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 741 int cow) 742 { 743 vm_map_entry_t new_entry; 744 vm_map_entry_t prev_entry; 745 vm_map_entry_t temp_entry; 746 vm_eflags_t protoeflags; 747 748 /* 749 * Check that the start and end points are not bogus. 750 */ 751 if ((start < map->min_offset) || (end > map->max_offset) || 752 (start >= end)) 753 return (KERN_INVALID_ADDRESS); 754 755 /* 756 * Find the entry prior to the proposed starting address; if it's part 757 * of an existing entry, this range is bogus. 758 */ 759 if (vm_map_lookup_entry(map, start, &temp_entry)) 760 return (KERN_NO_SPACE); 761 762 prev_entry = temp_entry; 763 764 /* 765 * Assert that the next entry doesn't overlap the end point. 766 */ 767 if ((prev_entry->next != &map->header) && 768 (prev_entry->next->start < end)) 769 return (KERN_NO_SPACE); 770 771 protoeflags = 0; 772 773 if (cow & MAP_COPY_ON_WRITE) 774 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 775 776 if (cow & MAP_NOFAULT) { 777 protoeflags |= MAP_ENTRY_NOFAULT; 778 779 KASSERT(object == NULL, 780 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 781 } 782 if (cow & MAP_DISABLE_SYNCER) 783 protoeflags |= MAP_ENTRY_NOSYNC; 784 if (cow & MAP_DISABLE_COREDUMP) 785 protoeflags |= MAP_ENTRY_NOCOREDUMP; 786 787 if (object != NULL) { 788 /* 789 * OBJ_ONEMAPPING must be cleared unless this mapping 790 * is trivially proven to be the only mapping for any 791 * of the object's pages. (Object granularity 792 * reference counting is insufficient to recognize 793 * aliases with precision.) 794 */ 795 VM_OBJECT_LOCK(object); 796 if (object->ref_count > 1 || object->shadow_count != 0) 797 vm_object_clear_flag(object, OBJ_ONEMAPPING); 798 VM_OBJECT_UNLOCK(object); 799 } 800 else if ((prev_entry != &map->header) && 801 (prev_entry->eflags == protoeflags) && 802 (prev_entry->end == start) && 803 (prev_entry->wired_count == 0) && 804 ((prev_entry->object.vm_object == NULL) || 805 vm_object_coalesce(prev_entry->object.vm_object, 806 prev_entry->offset, 807 (vm_size_t)(prev_entry->end - prev_entry->start), 808 (vm_size_t)(end - prev_entry->end)))) { 809 /* 810 * We were able to extend the object. Determine if we 811 * can extend the previous map entry to include the 812 * new range as well. 813 */ 814 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 815 (prev_entry->protection == prot) && 816 (prev_entry->max_protection == max)) { 817 map->size += (end - prev_entry->end); 818 prev_entry->end = end; 819 vm_map_simplify_entry(map, prev_entry); 820 return (KERN_SUCCESS); 821 } 822 823 /* 824 * If we can extend the object but cannot extend the 825 * map entry, we have to create a new map entry. We 826 * must bump the ref count on the extended object to 827 * account for it. object may be NULL. 828 */ 829 object = prev_entry->object.vm_object; 830 offset = prev_entry->offset + 831 (prev_entry->end - prev_entry->start); 832 vm_object_reference(object); 833 } 834 835 /* 836 * NOTE: if conditionals fail, object can be NULL here. This occurs 837 * in things like the buffer map where we manage kva but do not manage 838 * backing objects. 839 */ 840 841 /* 842 * Create a new entry 843 */ 844 new_entry = vm_map_entry_create(map); 845 new_entry->start = start; 846 new_entry->end = end; 847 848 new_entry->eflags = protoeflags; 849 new_entry->object.vm_object = object; 850 new_entry->offset = offset; 851 new_entry->avail_ssize = 0; 852 853 new_entry->inheritance = VM_INHERIT_DEFAULT; 854 new_entry->protection = prot; 855 new_entry->max_protection = max; 856 new_entry->wired_count = 0; 857 858 /* 859 * Insert the new entry into the list 860 */ 861 vm_map_entry_link(map, prev_entry, new_entry); 862 map->size += new_entry->end - new_entry->start; 863 864 /* 865 * Update the free space hint 866 */ 867 if ((map->first_free == prev_entry) && 868 (prev_entry->end >= new_entry->start)) { 869 map->first_free = new_entry; 870 } 871 872 #if 0 873 /* 874 * Temporarily removed to avoid MAP_STACK panic, due to 875 * MAP_STACK being a huge hack. Will be added back in 876 * when MAP_STACK (and the user stack mapping) is fixed. 877 */ 878 /* 879 * It may be possible to simplify the entry 880 */ 881 vm_map_simplify_entry(map, new_entry); 882 #endif 883 884 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 885 vm_map_pmap_enter(map, start, prot, 886 object, OFF_TO_IDX(offset), end - start, 887 cow & MAP_PREFAULT_PARTIAL); 888 } 889 890 return (KERN_SUCCESS); 891 } 892 893 /* 894 * Find sufficient space for `length' bytes in the given map, starting at 895 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 896 */ 897 int 898 vm_map_findspace( 899 vm_map_t map, 900 vm_offset_t start, 901 vm_size_t length, 902 vm_offset_t *addr) 903 { 904 vm_map_entry_t entry, next; 905 vm_offset_t end; 906 907 if (start < map->min_offset) 908 start = map->min_offset; 909 if (start > map->max_offset) 910 return (1); 911 912 /* 913 * Look for the first possible address; if there's already something 914 * at this address, we have to start after it. 915 */ 916 if (start == map->min_offset) { 917 if ((entry = map->first_free) != &map->header) 918 start = entry->end; 919 } else { 920 vm_map_entry_t tmp; 921 922 if (vm_map_lookup_entry(map, start, &tmp)) 923 start = tmp->end; 924 entry = tmp; 925 } 926 927 /* 928 * Look through the rest of the map, trying to fit a new region in the 929 * gap between existing regions, or after the very last region. 930 */ 931 for (;; start = (entry = next)->end) { 932 /* 933 * Find the end of the proposed new region. Be sure we didn't 934 * go beyond the end of the map, or wrap around the address; 935 * if so, we lose. Otherwise, if this is the last entry, or 936 * if the proposed new region fits before the next entry, we 937 * win. 938 */ 939 end = start + length; 940 if (end > map->max_offset || end < start) 941 return (1); 942 next = entry->next; 943 if (next == &map->header || next->start >= end) 944 break; 945 } 946 *addr = start; 947 if (map == kernel_map) { 948 vm_offset_t ksize; 949 if ((ksize = round_page(start + length)) > kernel_vm_end) { 950 pmap_growkernel(ksize); 951 } 952 } 953 return (0); 954 } 955 956 /* 957 * vm_map_find finds an unallocated region in the target address 958 * map with the given length. The search is defined to be 959 * first-fit from the specified address; the region found is 960 * returned in the same parameter. 961 * 962 * If object is non-NULL, ref count must be bumped by caller 963 * prior to making call to account for the new entry. 964 */ 965 int 966 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 967 vm_offset_t *addr, /* IN/OUT */ 968 vm_size_t length, boolean_t find_space, vm_prot_t prot, 969 vm_prot_t max, int cow) 970 { 971 vm_offset_t start; 972 int result, s = 0; 973 974 start = *addr; 975 976 if (map == kmem_map) 977 s = splvm(); 978 979 vm_map_lock(map); 980 if (find_space) { 981 if (vm_map_findspace(map, start, length, addr)) { 982 vm_map_unlock(map); 983 if (map == kmem_map) 984 splx(s); 985 return (KERN_NO_SPACE); 986 } 987 start = *addr; 988 } 989 result = vm_map_insert(map, object, offset, 990 start, start + length, prot, max, cow); 991 vm_map_unlock(map); 992 993 if (map == kmem_map) 994 splx(s); 995 996 return (result); 997 } 998 999 /* 1000 * vm_map_simplify_entry: 1001 * 1002 * Simplify the given map entry by merging with either neighbor. This 1003 * routine also has the ability to merge with both neighbors. 1004 * 1005 * The map must be locked. 1006 * 1007 * This routine guarentees that the passed entry remains valid (though 1008 * possibly extended). When merging, this routine may delete one or 1009 * both neighbors. 1010 */ 1011 void 1012 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1013 { 1014 vm_map_entry_t next, prev; 1015 vm_size_t prevsize, esize; 1016 1017 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1018 return; 1019 1020 prev = entry->prev; 1021 if (prev != &map->header) { 1022 prevsize = prev->end - prev->start; 1023 if ( (prev->end == entry->start) && 1024 (prev->object.vm_object == entry->object.vm_object) && 1025 (!prev->object.vm_object || 1026 (prev->offset + prevsize == entry->offset)) && 1027 (prev->eflags == entry->eflags) && 1028 (prev->protection == entry->protection) && 1029 (prev->max_protection == entry->max_protection) && 1030 (prev->inheritance == entry->inheritance) && 1031 (prev->wired_count == entry->wired_count)) { 1032 if (map->first_free == prev) 1033 map->first_free = entry; 1034 vm_map_entry_unlink(map, prev); 1035 entry->start = prev->start; 1036 entry->offset = prev->offset; 1037 if (prev->object.vm_object) 1038 vm_object_deallocate(prev->object.vm_object); 1039 vm_map_entry_dispose(map, prev); 1040 } 1041 } 1042 1043 next = entry->next; 1044 if (next != &map->header) { 1045 esize = entry->end - entry->start; 1046 if ((entry->end == next->start) && 1047 (next->object.vm_object == entry->object.vm_object) && 1048 (!entry->object.vm_object || 1049 (entry->offset + esize == next->offset)) && 1050 (next->eflags == entry->eflags) && 1051 (next->protection == entry->protection) && 1052 (next->max_protection == entry->max_protection) && 1053 (next->inheritance == entry->inheritance) && 1054 (next->wired_count == entry->wired_count)) { 1055 if (map->first_free == next) 1056 map->first_free = entry; 1057 vm_map_entry_unlink(map, next); 1058 entry->end = next->end; 1059 if (next->object.vm_object) 1060 vm_object_deallocate(next->object.vm_object); 1061 vm_map_entry_dispose(map, next); 1062 } 1063 } 1064 } 1065 /* 1066 * vm_map_clip_start: [ internal use only ] 1067 * 1068 * Asserts that the given entry begins at or after 1069 * the specified address; if necessary, 1070 * it splits the entry into two. 1071 */ 1072 #define vm_map_clip_start(map, entry, startaddr) \ 1073 { \ 1074 if (startaddr > entry->start) \ 1075 _vm_map_clip_start(map, entry, startaddr); \ 1076 } 1077 1078 /* 1079 * This routine is called only when it is known that 1080 * the entry must be split. 1081 */ 1082 static void 1083 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1084 { 1085 vm_map_entry_t new_entry; 1086 1087 /* 1088 * Split off the front portion -- note that we must insert the new 1089 * entry BEFORE this one, so that this entry has the specified 1090 * starting address. 1091 */ 1092 vm_map_simplify_entry(map, entry); 1093 1094 /* 1095 * If there is no object backing this entry, we might as well create 1096 * one now. If we defer it, an object can get created after the map 1097 * is clipped, and individual objects will be created for the split-up 1098 * map. This is a bit of a hack, but is also about the best place to 1099 * put this improvement. 1100 */ 1101 if (entry->object.vm_object == NULL && !map->system_map) { 1102 vm_object_t object; 1103 object = vm_object_allocate(OBJT_DEFAULT, 1104 atop(entry->end - entry->start)); 1105 entry->object.vm_object = object; 1106 entry->offset = 0; 1107 } 1108 1109 new_entry = vm_map_entry_create(map); 1110 *new_entry = *entry; 1111 1112 new_entry->end = start; 1113 entry->offset += (start - entry->start); 1114 entry->start = start; 1115 1116 vm_map_entry_link(map, entry->prev, new_entry); 1117 1118 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1119 vm_object_reference(new_entry->object.vm_object); 1120 } 1121 } 1122 1123 /* 1124 * vm_map_clip_end: [ internal use only ] 1125 * 1126 * Asserts that the given entry ends at or before 1127 * the specified address; if necessary, 1128 * it splits the entry into two. 1129 */ 1130 #define vm_map_clip_end(map, entry, endaddr) \ 1131 { \ 1132 if ((endaddr) < (entry->end)) \ 1133 _vm_map_clip_end((map), (entry), (endaddr)); \ 1134 } 1135 1136 /* 1137 * This routine is called only when it is known that 1138 * the entry must be split. 1139 */ 1140 static void 1141 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1142 { 1143 vm_map_entry_t new_entry; 1144 1145 /* 1146 * If there is no object backing this entry, we might as well create 1147 * one now. If we defer it, an object can get created after the map 1148 * is clipped, and individual objects will be created for the split-up 1149 * map. This is a bit of a hack, but is also about the best place to 1150 * put this improvement. 1151 */ 1152 if (entry->object.vm_object == NULL && !map->system_map) { 1153 vm_object_t object; 1154 object = vm_object_allocate(OBJT_DEFAULT, 1155 atop(entry->end - entry->start)); 1156 entry->object.vm_object = object; 1157 entry->offset = 0; 1158 } 1159 1160 /* 1161 * Create a new entry and insert it AFTER the specified entry 1162 */ 1163 new_entry = vm_map_entry_create(map); 1164 *new_entry = *entry; 1165 1166 new_entry->start = entry->end = end; 1167 new_entry->offset += (end - entry->start); 1168 1169 vm_map_entry_link(map, entry, new_entry); 1170 1171 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1172 vm_object_reference(new_entry->object.vm_object); 1173 } 1174 } 1175 1176 /* 1177 * VM_MAP_RANGE_CHECK: [ internal use only ] 1178 * 1179 * Asserts that the starting and ending region 1180 * addresses fall within the valid range of the map. 1181 */ 1182 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1183 { \ 1184 if (start < vm_map_min(map)) \ 1185 start = vm_map_min(map); \ 1186 if (end > vm_map_max(map)) \ 1187 end = vm_map_max(map); \ 1188 if (start > end) \ 1189 start = end; \ 1190 } 1191 1192 /* 1193 * vm_map_submap: [ kernel use only ] 1194 * 1195 * Mark the given range as handled by a subordinate map. 1196 * 1197 * This range must have been created with vm_map_find, 1198 * and no other operations may have been performed on this 1199 * range prior to calling vm_map_submap. 1200 * 1201 * Only a limited number of operations can be performed 1202 * within this rage after calling vm_map_submap: 1203 * vm_fault 1204 * [Don't try vm_map_copy!] 1205 * 1206 * To remove a submapping, one must first remove the 1207 * range from the superior map, and then destroy the 1208 * submap (if desired). [Better yet, don't try it.] 1209 */ 1210 int 1211 vm_map_submap( 1212 vm_map_t map, 1213 vm_offset_t start, 1214 vm_offset_t end, 1215 vm_map_t submap) 1216 { 1217 vm_map_entry_t entry; 1218 int result = KERN_INVALID_ARGUMENT; 1219 1220 vm_map_lock(map); 1221 1222 VM_MAP_RANGE_CHECK(map, start, end); 1223 1224 if (vm_map_lookup_entry(map, start, &entry)) { 1225 vm_map_clip_start(map, entry, start); 1226 } else 1227 entry = entry->next; 1228 1229 vm_map_clip_end(map, entry, end); 1230 1231 if ((entry->start == start) && (entry->end == end) && 1232 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1233 (entry->object.vm_object == NULL)) { 1234 entry->object.sub_map = submap; 1235 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1236 result = KERN_SUCCESS; 1237 } 1238 vm_map_unlock(map); 1239 1240 return (result); 1241 } 1242 1243 /* 1244 * The maximum number of pages to map 1245 */ 1246 #define MAX_INIT_PT 96 1247 1248 /* 1249 * vm_map_pmap_enter: 1250 * 1251 * Preload read-only mappings for the given object into the specified 1252 * map. This eliminates the soft faults on process startup and 1253 * immediately after an mmap(2). 1254 */ 1255 void 1256 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1257 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1258 { 1259 vm_offset_t tmpidx; 1260 int psize; 1261 vm_page_t p, mpte; 1262 1263 if ((prot & VM_PROT_READ) == 0 || object == NULL) 1264 return; 1265 mtx_lock(&Giant); 1266 VM_OBJECT_LOCK(object); 1267 if (object->type == OBJT_DEVICE) { 1268 pmap_object_init_pt(map->pmap, addr, object, pindex, size); 1269 goto unlock_return; 1270 } 1271 1272 psize = atop(size); 1273 1274 if (object->type != OBJT_VNODE || 1275 ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 1276 (object->resident_page_count > MAX_INIT_PT))) { 1277 goto unlock_return; 1278 } 1279 1280 if (psize + pindex > object->size) { 1281 if (object->size < pindex) 1282 goto unlock_return; 1283 psize = object->size - pindex; 1284 } 1285 1286 mpte = NULL; 1287 1288 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1289 if (p->pindex < pindex) { 1290 p = vm_page_splay(pindex, object->root); 1291 if ((object->root = p)->pindex < pindex) 1292 p = TAILQ_NEXT(p, listq); 1293 } 1294 } 1295 /* 1296 * Assert: the variable p is either (1) the page with the 1297 * least pindex greater than or equal to the parameter pindex 1298 * or (2) NULL. 1299 */ 1300 for (; 1301 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1302 p = TAILQ_NEXT(p, listq)) { 1303 /* 1304 * don't allow an madvise to blow away our really 1305 * free pages allocating pv entries. 1306 */ 1307 if ((flags & MAP_PREFAULT_MADVISE) && 1308 cnt.v_free_count < cnt.v_free_reserved) { 1309 break; 1310 } 1311 vm_page_lock_queues(); 1312 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 1313 (p->busy == 0) && 1314 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1315 if ((p->queue - p->pc) == PQ_CACHE) 1316 vm_page_deactivate(p); 1317 vm_page_busy(p); 1318 vm_page_unlock_queues(); 1319 VM_OBJECT_UNLOCK(object); 1320 mpte = pmap_enter_quick(map->pmap, 1321 addr + ptoa(tmpidx), p, mpte); 1322 VM_OBJECT_LOCK(object); 1323 vm_page_lock_queues(); 1324 vm_page_wakeup(p); 1325 } 1326 vm_page_unlock_queues(); 1327 } 1328 unlock_return: 1329 VM_OBJECT_UNLOCK(object); 1330 mtx_unlock(&Giant); 1331 } 1332 1333 /* 1334 * vm_map_protect: 1335 * 1336 * Sets the protection of the specified address 1337 * region in the target map. If "set_max" is 1338 * specified, the maximum protection is to be set; 1339 * otherwise, only the current protection is affected. 1340 */ 1341 int 1342 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1343 vm_prot_t new_prot, boolean_t set_max) 1344 { 1345 vm_map_entry_t current; 1346 vm_map_entry_t entry; 1347 1348 vm_map_lock(map); 1349 1350 VM_MAP_RANGE_CHECK(map, start, end); 1351 1352 if (vm_map_lookup_entry(map, start, &entry)) { 1353 vm_map_clip_start(map, entry, start); 1354 } else { 1355 entry = entry->next; 1356 } 1357 1358 /* 1359 * Make a first pass to check for protection violations. 1360 */ 1361 current = entry; 1362 while ((current != &map->header) && (current->start < end)) { 1363 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1364 vm_map_unlock(map); 1365 return (KERN_INVALID_ARGUMENT); 1366 } 1367 if ((new_prot & current->max_protection) != new_prot) { 1368 vm_map_unlock(map); 1369 return (KERN_PROTECTION_FAILURE); 1370 } 1371 current = current->next; 1372 } 1373 1374 /* 1375 * Go back and fix up protections. [Note that clipping is not 1376 * necessary the second time.] 1377 */ 1378 current = entry; 1379 while ((current != &map->header) && (current->start < end)) { 1380 vm_prot_t old_prot; 1381 1382 vm_map_clip_end(map, current, end); 1383 1384 old_prot = current->protection; 1385 if (set_max) 1386 current->protection = 1387 (current->max_protection = new_prot) & 1388 old_prot; 1389 else 1390 current->protection = new_prot; 1391 1392 /* 1393 * Update physical map if necessary. Worry about copy-on-write 1394 * here -- CHECK THIS XXX 1395 */ 1396 if (current->protection != old_prot) { 1397 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1398 VM_PROT_ALL) 1399 pmap_protect(map->pmap, current->start, 1400 current->end, 1401 current->protection & MASK(current)); 1402 #undef MASK 1403 } 1404 vm_map_simplify_entry(map, current); 1405 current = current->next; 1406 } 1407 vm_map_unlock(map); 1408 return (KERN_SUCCESS); 1409 } 1410 1411 /* 1412 * vm_map_madvise: 1413 * 1414 * This routine traverses a processes map handling the madvise 1415 * system call. Advisories are classified as either those effecting 1416 * the vm_map_entry structure, or those effecting the underlying 1417 * objects. 1418 */ 1419 int 1420 vm_map_madvise( 1421 vm_map_t map, 1422 vm_offset_t start, 1423 vm_offset_t end, 1424 int behav) 1425 { 1426 vm_map_entry_t current, entry; 1427 int modify_map = 0; 1428 1429 /* 1430 * Some madvise calls directly modify the vm_map_entry, in which case 1431 * we need to use an exclusive lock on the map and we need to perform 1432 * various clipping operations. Otherwise we only need a read-lock 1433 * on the map. 1434 */ 1435 switch(behav) { 1436 case MADV_NORMAL: 1437 case MADV_SEQUENTIAL: 1438 case MADV_RANDOM: 1439 case MADV_NOSYNC: 1440 case MADV_AUTOSYNC: 1441 case MADV_NOCORE: 1442 case MADV_CORE: 1443 modify_map = 1; 1444 vm_map_lock(map); 1445 break; 1446 case MADV_WILLNEED: 1447 case MADV_DONTNEED: 1448 case MADV_FREE: 1449 vm_map_lock_read(map); 1450 break; 1451 default: 1452 return (KERN_INVALID_ARGUMENT); 1453 } 1454 1455 /* 1456 * Locate starting entry and clip if necessary. 1457 */ 1458 VM_MAP_RANGE_CHECK(map, start, end); 1459 1460 if (vm_map_lookup_entry(map, start, &entry)) { 1461 if (modify_map) 1462 vm_map_clip_start(map, entry, start); 1463 } else { 1464 entry = entry->next; 1465 } 1466 1467 if (modify_map) { 1468 /* 1469 * madvise behaviors that are implemented in the vm_map_entry. 1470 * 1471 * We clip the vm_map_entry so that behavioral changes are 1472 * limited to the specified address range. 1473 */ 1474 for (current = entry; 1475 (current != &map->header) && (current->start < end); 1476 current = current->next 1477 ) { 1478 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1479 continue; 1480 1481 vm_map_clip_end(map, current, end); 1482 1483 switch (behav) { 1484 case MADV_NORMAL: 1485 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1486 break; 1487 case MADV_SEQUENTIAL: 1488 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1489 break; 1490 case MADV_RANDOM: 1491 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1492 break; 1493 case MADV_NOSYNC: 1494 current->eflags |= MAP_ENTRY_NOSYNC; 1495 break; 1496 case MADV_AUTOSYNC: 1497 current->eflags &= ~MAP_ENTRY_NOSYNC; 1498 break; 1499 case MADV_NOCORE: 1500 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1501 break; 1502 case MADV_CORE: 1503 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1504 break; 1505 default: 1506 break; 1507 } 1508 vm_map_simplify_entry(map, current); 1509 } 1510 vm_map_unlock(map); 1511 } else { 1512 vm_pindex_t pindex; 1513 int count; 1514 1515 /* 1516 * madvise behaviors that are implemented in the underlying 1517 * vm_object. 1518 * 1519 * Since we don't clip the vm_map_entry, we have to clip 1520 * the vm_object pindex and count. 1521 */ 1522 for (current = entry; 1523 (current != &map->header) && (current->start < end); 1524 current = current->next 1525 ) { 1526 vm_offset_t useStart; 1527 1528 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1529 continue; 1530 1531 pindex = OFF_TO_IDX(current->offset); 1532 count = atop(current->end - current->start); 1533 useStart = current->start; 1534 1535 if (current->start < start) { 1536 pindex += atop(start - current->start); 1537 count -= atop(start - current->start); 1538 useStart = start; 1539 } 1540 if (current->end > end) 1541 count -= atop(current->end - end); 1542 1543 if (count <= 0) 1544 continue; 1545 1546 vm_object_madvise(current->object.vm_object, 1547 pindex, count, behav); 1548 if (behav == MADV_WILLNEED) { 1549 vm_map_pmap_enter(map, 1550 useStart, 1551 current->protection, 1552 current->object.vm_object, 1553 pindex, 1554 (count << PAGE_SHIFT), 1555 MAP_PREFAULT_MADVISE 1556 ); 1557 } 1558 } 1559 vm_map_unlock_read(map); 1560 } 1561 return (0); 1562 } 1563 1564 1565 /* 1566 * vm_map_inherit: 1567 * 1568 * Sets the inheritance of the specified address 1569 * range in the target map. Inheritance 1570 * affects how the map will be shared with 1571 * child maps at the time of vm_map_fork. 1572 */ 1573 int 1574 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1575 vm_inherit_t new_inheritance) 1576 { 1577 vm_map_entry_t entry; 1578 vm_map_entry_t temp_entry; 1579 1580 switch (new_inheritance) { 1581 case VM_INHERIT_NONE: 1582 case VM_INHERIT_COPY: 1583 case VM_INHERIT_SHARE: 1584 break; 1585 default: 1586 return (KERN_INVALID_ARGUMENT); 1587 } 1588 vm_map_lock(map); 1589 VM_MAP_RANGE_CHECK(map, start, end); 1590 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1591 entry = temp_entry; 1592 vm_map_clip_start(map, entry, start); 1593 } else 1594 entry = temp_entry->next; 1595 while ((entry != &map->header) && (entry->start < end)) { 1596 vm_map_clip_end(map, entry, end); 1597 entry->inheritance = new_inheritance; 1598 vm_map_simplify_entry(map, entry); 1599 entry = entry->next; 1600 } 1601 vm_map_unlock(map); 1602 return (KERN_SUCCESS); 1603 } 1604 1605 /* 1606 * vm_map_unwire: 1607 * 1608 * Implements both kernel and user unwiring. 1609 */ 1610 int 1611 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1612 int flags) 1613 { 1614 vm_map_entry_t entry, first_entry, tmp_entry; 1615 vm_offset_t saved_start; 1616 unsigned int last_timestamp; 1617 int rv; 1618 boolean_t need_wakeup, result, user_unwire; 1619 1620 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1621 vm_map_lock(map); 1622 VM_MAP_RANGE_CHECK(map, start, end); 1623 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1624 if (flags & VM_MAP_WIRE_HOLESOK) 1625 first_entry = first_entry->next; 1626 else { 1627 vm_map_unlock(map); 1628 return (KERN_INVALID_ADDRESS); 1629 } 1630 } 1631 last_timestamp = map->timestamp; 1632 entry = first_entry; 1633 while (entry != &map->header && entry->start < end) { 1634 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1635 /* 1636 * We have not yet clipped the entry. 1637 */ 1638 saved_start = (start >= entry->start) ? start : 1639 entry->start; 1640 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1641 if (vm_map_unlock_and_wait(map, user_unwire)) { 1642 /* 1643 * Allow interruption of user unwiring? 1644 */ 1645 } 1646 vm_map_lock(map); 1647 if (last_timestamp+1 != map->timestamp) { 1648 /* 1649 * Look again for the entry because the map was 1650 * modified while it was unlocked. 1651 * Specifically, the entry may have been 1652 * clipped, merged, or deleted. 1653 */ 1654 if (!vm_map_lookup_entry(map, saved_start, 1655 &tmp_entry)) { 1656 if (flags & VM_MAP_WIRE_HOLESOK) 1657 tmp_entry = tmp_entry->next; 1658 else { 1659 if (saved_start == start) { 1660 /* 1661 * First_entry has been deleted. 1662 */ 1663 vm_map_unlock(map); 1664 return (KERN_INVALID_ADDRESS); 1665 } 1666 end = saved_start; 1667 rv = KERN_INVALID_ADDRESS; 1668 goto done; 1669 } 1670 } 1671 if (entry == first_entry) 1672 first_entry = tmp_entry; 1673 else 1674 first_entry = NULL; 1675 entry = tmp_entry; 1676 } 1677 last_timestamp = map->timestamp; 1678 continue; 1679 } 1680 vm_map_clip_start(map, entry, start); 1681 vm_map_clip_end(map, entry, end); 1682 /* 1683 * Mark the entry in case the map lock is released. (See 1684 * above.) 1685 */ 1686 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1687 /* 1688 * Check the map for holes in the specified region. 1689 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1690 */ 1691 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1692 (entry->end < end && (entry->next == &map->header || 1693 entry->next->start > entry->end))) { 1694 end = entry->end; 1695 rv = KERN_INVALID_ADDRESS; 1696 goto done; 1697 } 1698 /* 1699 * If system unwiring, require that the entry is system wired. 1700 */ 1701 if (!user_unwire && entry->wired_count < ((entry->eflags & 1702 MAP_ENTRY_USER_WIRED) ? 2 : 1)) { 1703 end = entry->end; 1704 rv = KERN_INVALID_ARGUMENT; 1705 goto done; 1706 } 1707 entry = entry->next; 1708 } 1709 rv = KERN_SUCCESS; 1710 done: 1711 need_wakeup = FALSE; 1712 if (first_entry == NULL) { 1713 result = vm_map_lookup_entry(map, start, &first_entry); 1714 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1715 first_entry = first_entry->next; 1716 else 1717 KASSERT(result, ("vm_map_unwire: lookup failed")); 1718 } 1719 entry = first_entry; 1720 while (entry != &map->header && entry->start < end) { 1721 if (rv == KERN_SUCCESS && (!user_unwire || 1722 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 1723 if (user_unwire) 1724 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1725 entry->wired_count--; 1726 if (entry->wired_count == 0) { 1727 /* 1728 * Retain the map lock. 1729 */ 1730 vm_fault_unwire(map, entry->start, entry->end, 1731 entry->object.vm_object != NULL && 1732 entry->object.vm_object->type == OBJT_DEVICE); 1733 } 1734 } 1735 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1736 ("vm_map_unwire: in-transition flag missing")); 1737 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1738 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1739 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1740 need_wakeup = TRUE; 1741 } 1742 vm_map_simplify_entry(map, entry); 1743 entry = entry->next; 1744 } 1745 vm_map_unlock(map); 1746 if (need_wakeup) 1747 vm_map_wakeup(map); 1748 return (rv); 1749 } 1750 1751 /* 1752 * vm_map_wire: 1753 * 1754 * Implements both kernel and user wiring. 1755 */ 1756 int 1757 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1758 int flags) 1759 { 1760 vm_map_entry_t entry, first_entry, tmp_entry; 1761 vm_offset_t saved_end, saved_start; 1762 unsigned int last_timestamp; 1763 int rv; 1764 boolean_t fictitious, need_wakeup, result, user_wire; 1765 1766 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1767 vm_map_lock(map); 1768 VM_MAP_RANGE_CHECK(map, start, end); 1769 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1770 if (flags & VM_MAP_WIRE_HOLESOK) 1771 first_entry = first_entry->next; 1772 else { 1773 vm_map_unlock(map); 1774 return (KERN_INVALID_ADDRESS); 1775 } 1776 } 1777 last_timestamp = map->timestamp; 1778 entry = first_entry; 1779 while (entry != &map->header && entry->start < end) { 1780 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1781 /* 1782 * We have not yet clipped the entry. 1783 */ 1784 saved_start = (start >= entry->start) ? start : 1785 entry->start; 1786 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1787 if (vm_map_unlock_and_wait(map, user_wire)) { 1788 /* 1789 * Allow interruption of user wiring? 1790 */ 1791 } 1792 vm_map_lock(map); 1793 if (last_timestamp + 1 != map->timestamp) { 1794 /* 1795 * Look again for the entry because the map was 1796 * modified while it was unlocked. 1797 * Specifically, the entry may have been 1798 * clipped, merged, or deleted. 1799 */ 1800 if (!vm_map_lookup_entry(map, saved_start, 1801 &tmp_entry)) { 1802 if (flags & VM_MAP_WIRE_HOLESOK) 1803 tmp_entry = tmp_entry->next; 1804 else { 1805 if (saved_start == start) { 1806 /* 1807 * first_entry has been deleted. 1808 */ 1809 vm_map_unlock(map); 1810 return (KERN_INVALID_ADDRESS); 1811 } 1812 end = saved_start; 1813 rv = KERN_INVALID_ADDRESS; 1814 goto done; 1815 } 1816 } 1817 if (entry == first_entry) 1818 first_entry = tmp_entry; 1819 else 1820 first_entry = NULL; 1821 entry = tmp_entry; 1822 } 1823 last_timestamp = map->timestamp; 1824 continue; 1825 } 1826 vm_map_clip_start(map, entry, start); 1827 vm_map_clip_end(map, entry, end); 1828 /* 1829 * Mark the entry in case the map lock is released. (See 1830 * above.) 1831 */ 1832 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1833 /* 1834 * 1835 */ 1836 if (entry->wired_count == 0) { 1837 entry->wired_count++; 1838 saved_start = entry->start; 1839 saved_end = entry->end; 1840 fictitious = entry->object.vm_object != NULL && 1841 entry->object.vm_object->type == OBJT_DEVICE; 1842 /* 1843 * Release the map lock, relying on the in-transition 1844 * mark. 1845 */ 1846 vm_map_unlock(map); 1847 rv = vm_fault_wire(map, saved_start, saved_end, 1848 user_wire, fictitious); 1849 vm_map_lock(map); 1850 if (last_timestamp + 1 != map->timestamp) { 1851 /* 1852 * Look again for the entry because the map was 1853 * modified while it was unlocked. The entry 1854 * may have been clipped, but NOT merged or 1855 * deleted. 1856 */ 1857 result = vm_map_lookup_entry(map, saved_start, 1858 &tmp_entry); 1859 KASSERT(result, ("vm_map_wire: lookup failed")); 1860 if (entry == first_entry) 1861 first_entry = tmp_entry; 1862 else 1863 first_entry = NULL; 1864 entry = tmp_entry; 1865 while (entry->end < saved_end) { 1866 if (rv != KERN_SUCCESS) { 1867 KASSERT(entry->wired_count == 1, 1868 ("vm_map_wire: bad count")); 1869 entry->wired_count = -1; 1870 } 1871 entry = entry->next; 1872 } 1873 } 1874 last_timestamp = map->timestamp; 1875 if (rv != KERN_SUCCESS) { 1876 KASSERT(entry->wired_count == 1, 1877 ("vm_map_wire: bad count")); 1878 /* 1879 * Assign an out-of-range value to represent 1880 * the failure to wire this entry. 1881 */ 1882 entry->wired_count = -1; 1883 end = entry->end; 1884 goto done; 1885 } 1886 } else if (!user_wire || 1887 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 1888 entry->wired_count++; 1889 } 1890 /* 1891 * Check the map for holes in the specified region. 1892 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1893 */ 1894 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1895 (entry->end < end && (entry->next == &map->header || 1896 entry->next->start > entry->end))) { 1897 end = entry->end; 1898 rv = KERN_INVALID_ADDRESS; 1899 goto done; 1900 } 1901 entry = entry->next; 1902 } 1903 rv = KERN_SUCCESS; 1904 done: 1905 need_wakeup = FALSE; 1906 if (first_entry == NULL) { 1907 result = vm_map_lookup_entry(map, start, &first_entry); 1908 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1909 first_entry = first_entry->next; 1910 else 1911 KASSERT(result, ("vm_map_wire: lookup failed")); 1912 } 1913 entry = first_entry; 1914 while (entry != &map->header && entry->start < end) { 1915 if (rv == KERN_SUCCESS) { 1916 if (user_wire) 1917 entry->eflags |= MAP_ENTRY_USER_WIRED; 1918 } else if (entry->wired_count == -1) { 1919 /* 1920 * Wiring failed on this entry. Thus, unwiring is 1921 * unnecessary. 1922 */ 1923 entry->wired_count = 0; 1924 } else { 1925 if (!user_wire || 1926 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1927 entry->wired_count--; 1928 if (entry->wired_count == 0) { 1929 /* 1930 * Retain the map lock. 1931 */ 1932 vm_fault_unwire(map, entry->start, entry->end, 1933 entry->object.vm_object != NULL && 1934 entry->object.vm_object->type == OBJT_DEVICE); 1935 } 1936 } 1937 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1938 ("vm_map_wire: in-transition flag missing")); 1939 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1940 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1941 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1942 need_wakeup = TRUE; 1943 } 1944 vm_map_simplify_entry(map, entry); 1945 entry = entry->next; 1946 } 1947 vm_map_unlock(map); 1948 if (need_wakeup) 1949 vm_map_wakeup(map); 1950 return (rv); 1951 } 1952 1953 /* 1954 * vm_map_sync 1955 * 1956 * Push any dirty cached pages in the address range to their pager. 1957 * If syncio is TRUE, dirty pages are written synchronously. 1958 * If invalidate is TRUE, any cached pages are freed as well. 1959 * 1960 * If the size of the region from start to end is zero, we are 1961 * supposed to flush all modified pages within the region containing 1962 * start. Unfortunately, a region can be split or coalesced with 1963 * neighboring regions, making it difficult to determine what the 1964 * original region was. Therefore, we approximate this requirement by 1965 * flushing the current region containing start. 1966 * 1967 * Returns an error if any part of the specified range is not mapped. 1968 */ 1969 int 1970 vm_map_sync( 1971 vm_map_t map, 1972 vm_offset_t start, 1973 vm_offset_t end, 1974 boolean_t syncio, 1975 boolean_t invalidate) 1976 { 1977 vm_map_entry_t current; 1978 vm_map_entry_t entry; 1979 vm_size_t size; 1980 vm_object_t object; 1981 vm_ooffset_t offset; 1982 1983 vm_map_lock_read(map); 1984 VM_MAP_RANGE_CHECK(map, start, end); 1985 if (!vm_map_lookup_entry(map, start, &entry)) { 1986 vm_map_unlock_read(map); 1987 return (KERN_INVALID_ADDRESS); 1988 } else if (start == end) { 1989 start = entry->start; 1990 end = entry->end; 1991 } 1992 /* 1993 * Make a first pass to check for user-wired memory and holes. 1994 */ 1995 for (current = entry; current->start < end; current = current->next) { 1996 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 1997 vm_map_unlock_read(map); 1998 return (KERN_INVALID_ARGUMENT); 1999 } 2000 if (end > current->end && 2001 (current->next == &map->header || 2002 current->end != current->next->start)) { 2003 vm_map_unlock_read(map); 2004 return (KERN_INVALID_ADDRESS); 2005 } 2006 } 2007 2008 if (invalidate) { 2009 mtx_lock(&Giant); 2010 pmap_remove(map->pmap, start, end); 2011 mtx_unlock(&Giant); 2012 } 2013 /* 2014 * Make a second pass, cleaning/uncaching pages from the indicated 2015 * objects as we go. 2016 */ 2017 for (current = entry; current->start < end; current = current->next) { 2018 offset = current->offset + (start - current->start); 2019 size = (end <= current->end ? end : current->end) - start; 2020 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2021 vm_map_t smap; 2022 vm_map_entry_t tentry; 2023 vm_size_t tsize; 2024 2025 smap = current->object.sub_map; 2026 vm_map_lock_read(smap); 2027 (void) vm_map_lookup_entry(smap, offset, &tentry); 2028 tsize = tentry->end - offset; 2029 if (tsize < size) 2030 size = tsize; 2031 object = tentry->object.vm_object; 2032 offset = tentry->offset + (offset - tentry->start); 2033 vm_map_unlock_read(smap); 2034 } else { 2035 object = current->object.vm_object; 2036 } 2037 vm_object_sync(object, offset, size, syncio, invalidate); 2038 start += size; 2039 } 2040 2041 vm_map_unlock_read(map); 2042 return (KERN_SUCCESS); 2043 } 2044 2045 /* 2046 * vm_map_entry_unwire: [ internal use only ] 2047 * 2048 * Make the region specified by this entry pageable. 2049 * 2050 * The map in question should be locked. 2051 * [This is the reason for this routine's existence.] 2052 */ 2053 static void 2054 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2055 { 2056 vm_fault_unwire(map, entry->start, entry->end, 2057 entry->object.vm_object != NULL && 2058 entry->object.vm_object->type == OBJT_DEVICE); 2059 entry->wired_count = 0; 2060 } 2061 2062 /* 2063 * vm_map_entry_delete: [ internal use only ] 2064 * 2065 * Deallocate the given entry from the target map. 2066 */ 2067 static void 2068 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2069 { 2070 vm_object_t object; 2071 vm_pindex_t offidxstart, offidxend, count; 2072 2073 vm_map_entry_unlink(map, entry); 2074 map->size -= entry->end - entry->start; 2075 2076 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2077 (object = entry->object.vm_object) != NULL) { 2078 count = OFF_TO_IDX(entry->end - entry->start); 2079 offidxstart = OFF_TO_IDX(entry->offset); 2080 offidxend = offidxstart + count; 2081 VM_OBJECT_LOCK(object); 2082 if (object->ref_count != 1 && 2083 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2084 object == kernel_object || object == kmem_object) && 2085 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2086 vm_object_collapse(object); 2087 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2088 if (object->type == OBJT_SWAP) 2089 swap_pager_freespace(object, offidxstart, count); 2090 if (offidxend >= object->size && 2091 offidxstart < object->size) 2092 object->size = offidxstart; 2093 } 2094 VM_OBJECT_UNLOCK(object); 2095 vm_object_deallocate(object); 2096 } 2097 2098 vm_map_entry_dispose(map, entry); 2099 } 2100 2101 /* 2102 * vm_map_delete: [ internal use only ] 2103 * 2104 * Deallocates the given address range from the target 2105 * map. 2106 */ 2107 int 2108 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2109 { 2110 vm_map_entry_t entry; 2111 vm_map_entry_t first_entry; 2112 2113 /* 2114 * Find the start of the region, and clip it 2115 */ 2116 if (!vm_map_lookup_entry(map, start, &first_entry)) 2117 entry = first_entry->next; 2118 else { 2119 entry = first_entry; 2120 vm_map_clip_start(map, entry, start); 2121 } 2122 2123 /* 2124 * Save the free space hint 2125 */ 2126 if (entry == &map->header) { 2127 map->first_free = &map->header; 2128 } else if (map->first_free->start >= start) { 2129 map->first_free = entry->prev; 2130 } 2131 2132 /* 2133 * Step through all entries in this region 2134 */ 2135 while ((entry != &map->header) && (entry->start < end)) { 2136 vm_map_entry_t next; 2137 2138 /* 2139 * Wait for wiring or unwiring of an entry to complete. 2140 */ 2141 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { 2142 unsigned int last_timestamp; 2143 vm_offset_t saved_start; 2144 vm_map_entry_t tmp_entry; 2145 2146 saved_start = entry->start; 2147 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2148 last_timestamp = map->timestamp; 2149 (void) vm_map_unlock_and_wait(map, FALSE); 2150 vm_map_lock(map); 2151 if (last_timestamp + 1 != map->timestamp) { 2152 /* 2153 * Look again for the entry because the map was 2154 * modified while it was unlocked. 2155 * Specifically, the entry may have been 2156 * clipped, merged, or deleted. 2157 */ 2158 if (!vm_map_lookup_entry(map, saved_start, 2159 &tmp_entry)) 2160 entry = tmp_entry->next; 2161 else { 2162 entry = tmp_entry; 2163 vm_map_clip_start(map, entry, 2164 saved_start); 2165 } 2166 } 2167 continue; 2168 } 2169 vm_map_clip_end(map, entry, end); 2170 2171 next = entry->next; 2172 2173 /* 2174 * Unwire before removing addresses from the pmap; otherwise, 2175 * unwiring will put the entries back in the pmap. 2176 */ 2177 if (entry->wired_count != 0) { 2178 vm_map_entry_unwire(map, entry); 2179 } 2180 2181 if (!map->system_map) 2182 mtx_lock(&Giant); 2183 pmap_remove(map->pmap, entry->start, entry->end); 2184 if (!map->system_map) 2185 mtx_unlock(&Giant); 2186 2187 /* 2188 * Delete the entry (which may delete the object) only after 2189 * removing all pmap entries pointing to its pages. 2190 * (Otherwise, its page frames may be reallocated, and any 2191 * modify bits will be set in the wrong object!) 2192 */ 2193 vm_map_entry_delete(map, entry); 2194 entry = next; 2195 } 2196 return (KERN_SUCCESS); 2197 } 2198 2199 /* 2200 * vm_map_remove: 2201 * 2202 * Remove the given address range from the target map. 2203 * This is the exported form of vm_map_delete. 2204 */ 2205 int 2206 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2207 { 2208 int result, s = 0; 2209 2210 if (map == kmem_map) 2211 s = splvm(); 2212 2213 vm_map_lock(map); 2214 VM_MAP_RANGE_CHECK(map, start, end); 2215 result = vm_map_delete(map, start, end); 2216 vm_map_unlock(map); 2217 2218 if (map == kmem_map) 2219 splx(s); 2220 2221 return (result); 2222 } 2223 2224 /* 2225 * vm_map_check_protection: 2226 * 2227 * Assert that the target map allows the specified privilege on the 2228 * entire address region given. The entire region must be allocated. 2229 * 2230 * WARNING! This code does not and should not check whether the 2231 * contents of the region is accessible. For example a smaller file 2232 * might be mapped into a larger address space. 2233 * 2234 * NOTE! This code is also called by munmap(). 2235 * 2236 * The map must be locked. A read lock is sufficient. 2237 */ 2238 boolean_t 2239 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2240 vm_prot_t protection) 2241 { 2242 vm_map_entry_t entry; 2243 vm_map_entry_t tmp_entry; 2244 2245 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2246 return (FALSE); 2247 entry = tmp_entry; 2248 2249 while (start < end) { 2250 if (entry == &map->header) 2251 return (FALSE); 2252 /* 2253 * No holes allowed! 2254 */ 2255 if (start < entry->start) 2256 return (FALSE); 2257 /* 2258 * Check protection associated with entry. 2259 */ 2260 if ((entry->protection & protection) != protection) 2261 return (FALSE); 2262 /* go to next entry */ 2263 start = entry->end; 2264 entry = entry->next; 2265 } 2266 return (TRUE); 2267 } 2268 2269 /* 2270 * vm_map_copy_entry: 2271 * 2272 * Copies the contents of the source entry to the destination 2273 * entry. The entries *must* be aligned properly. 2274 */ 2275 static void 2276 vm_map_copy_entry( 2277 vm_map_t src_map, 2278 vm_map_t dst_map, 2279 vm_map_entry_t src_entry, 2280 vm_map_entry_t dst_entry) 2281 { 2282 vm_object_t src_object; 2283 2284 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2285 return; 2286 2287 if (src_entry->wired_count == 0) { 2288 2289 /* 2290 * If the source entry is marked needs_copy, it is already 2291 * write-protected. 2292 */ 2293 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2294 pmap_protect(src_map->pmap, 2295 src_entry->start, 2296 src_entry->end, 2297 src_entry->protection & ~VM_PROT_WRITE); 2298 } 2299 2300 /* 2301 * Make a copy of the object. 2302 */ 2303 if ((src_object = src_entry->object.vm_object) != NULL) { 2304 VM_OBJECT_LOCK(src_object); 2305 if ((src_object->handle == NULL) && 2306 (src_object->type == OBJT_DEFAULT || 2307 src_object->type == OBJT_SWAP)) { 2308 vm_object_collapse(src_object); 2309 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2310 vm_object_split(src_entry); 2311 src_object = src_entry->object.vm_object; 2312 } 2313 } 2314 vm_object_reference_locked(src_object); 2315 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2316 VM_OBJECT_UNLOCK(src_object); 2317 dst_entry->object.vm_object = src_object; 2318 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2319 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2320 dst_entry->offset = src_entry->offset; 2321 } else { 2322 dst_entry->object.vm_object = NULL; 2323 dst_entry->offset = 0; 2324 } 2325 2326 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2327 dst_entry->end - dst_entry->start, src_entry->start); 2328 } else { 2329 /* 2330 * Of course, wired down pages can't be set copy-on-write. 2331 * Cause wired pages to be copied into the new map by 2332 * simulating faults (the new pages are pageable) 2333 */ 2334 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2335 } 2336 } 2337 2338 /* 2339 * vmspace_map_entry_forked: 2340 * Update the newly-forked vmspace each time a map entry is inherited 2341 * or copied. The values for vm_dsize and vm_tsize are approximate 2342 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 2343 */ 2344 static void 2345 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 2346 vm_map_entry_t entry) 2347 { 2348 vm_size_t entrysize; 2349 vm_offset_t newend; 2350 2351 entrysize = entry->end - entry->start; 2352 vm2->vm_map.size += entrysize; 2353 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 2354 vm2->vm_ssize += btoc(entrysize); 2355 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 2356 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 2357 newend = MIN(entry->end, 2358 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 2359 vm2->vm_dsize += btoc(newend - entry->start); 2360 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 2361 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 2362 newend = MIN(entry->end, 2363 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 2364 vm2->vm_tsize += btoc(newend - entry->start); 2365 } 2366 } 2367 2368 /* 2369 * vmspace_fork: 2370 * Create a new process vmspace structure and vm_map 2371 * based on those of an existing process. The new map 2372 * is based on the old map, according to the inheritance 2373 * values on the regions in that map. 2374 * 2375 * XXX It might be worth coalescing the entries added to the new vmspace. 2376 * 2377 * The source map must not be locked. 2378 */ 2379 struct vmspace * 2380 vmspace_fork(struct vmspace *vm1) 2381 { 2382 struct vmspace *vm2; 2383 vm_map_t old_map = &vm1->vm_map; 2384 vm_map_t new_map; 2385 vm_map_entry_t old_entry; 2386 vm_map_entry_t new_entry; 2387 vm_object_t object; 2388 2389 GIANT_REQUIRED; 2390 2391 vm_map_lock(old_map); 2392 old_map->infork = 1; 2393 2394 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2395 vm2->vm_taddr = vm1->vm_taddr; 2396 vm2->vm_daddr = vm1->vm_daddr; 2397 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 2398 new_map = &vm2->vm_map; /* XXX */ 2399 new_map->timestamp = 1; 2400 2401 /* Do not inherit the MAP_WIREFUTURE property. */ 2402 if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) 2403 new_map->flags &= ~MAP_WIREFUTURE; 2404 2405 old_entry = old_map->header.next; 2406 2407 while (old_entry != &old_map->header) { 2408 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2409 panic("vm_map_fork: encountered a submap"); 2410 2411 switch (old_entry->inheritance) { 2412 case VM_INHERIT_NONE: 2413 break; 2414 2415 case VM_INHERIT_SHARE: 2416 /* 2417 * Clone the entry, creating the shared object if necessary. 2418 */ 2419 object = old_entry->object.vm_object; 2420 if (object == NULL) { 2421 object = vm_object_allocate(OBJT_DEFAULT, 2422 atop(old_entry->end - old_entry->start)); 2423 old_entry->object.vm_object = object; 2424 old_entry->offset = (vm_offset_t) 0; 2425 } 2426 2427 /* 2428 * Add the reference before calling vm_object_shadow 2429 * to insure that a shadow object is created. 2430 */ 2431 vm_object_reference(object); 2432 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2433 vm_object_shadow(&old_entry->object.vm_object, 2434 &old_entry->offset, 2435 atop(old_entry->end - old_entry->start)); 2436 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2437 /* Transfer the second reference too. */ 2438 vm_object_reference( 2439 old_entry->object.vm_object); 2440 vm_object_deallocate(object); 2441 object = old_entry->object.vm_object; 2442 } 2443 VM_OBJECT_LOCK(object); 2444 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2445 VM_OBJECT_UNLOCK(object); 2446 2447 /* 2448 * Clone the entry, referencing the shared object. 2449 */ 2450 new_entry = vm_map_entry_create(new_map); 2451 *new_entry = *old_entry; 2452 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2453 new_entry->wired_count = 0; 2454 2455 /* 2456 * Insert the entry into the new map -- we know we're 2457 * inserting at the end of the new map. 2458 */ 2459 vm_map_entry_link(new_map, new_map->header.prev, 2460 new_entry); 2461 vmspace_map_entry_forked(vm1, vm2, new_entry); 2462 2463 /* 2464 * Update the physical map 2465 */ 2466 pmap_copy(new_map->pmap, old_map->pmap, 2467 new_entry->start, 2468 (old_entry->end - old_entry->start), 2469 old_entry->start); 2470 break; 2471 2472 case VM_INHERIT_COPY: 2473 /* 2474 * Clone the entry and link into the map. 2475 */ 2476 new_entry = vm_map_entry_create(new_map); 2477 *new_entry = *old_entry; 2478 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2479 new_entry->wired_count = 0; 2480 new_entry->object.vm_object = NULL; 2481 vm_map_entry_link(new_map, new_map->header.prev, 2482 new_entry); 2483 vmspace_map_entry_forked(vm1, vm2, new_entry); 2484 vm_map_copy_entry(old_map, new_map, old_entry, 2485 new_entry); 2486 break; 2487 } 2488 old_entry = old_entry->next; 2489 } 2490 2491 old_map->infork = 0; 2492 vm_map_unlock(old_map); 2493 2494 return (vm2); 2495 } 2496 2497 int 2498 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2499 vm_prot_t prot, vm_prot_t max, int cow) 2500 { 2501 vm_map_entry_t new_entry, prev_entry; 2502 vm_offset_t bot, top; 2503 vm_size_t init_ssize; 2504 int orient, rv; 2505 rlim_t vmemlim; 2506 2507 /* 2508 * The stack orientation is piggybacked with the cow argument. 2509 * Extract it into orient and mask the cow argument so that we 2510 * don't pass it around further. 2511 * NOTE: We explicitly allow bi-directional stacks. 2512 */ 2513 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 2514 cow &= ~orient; 2515 KASSERT(orient != 0, ("No stack grow direction")); 2516 2517 if (addrbos < vm_map_min(map) || addrbos > map->max_offset) 2518 return (KERN_NO_SPACE); 2519 2520 init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 2521 2522 PROC_LOCK(curthread->td_proc); 2523 vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); 2524 PROC_UNLOCK(curthread->td_proc); 2525 2526 vm_map_lock(map); 2527 2528 /* If addr is already mapped, no go */ 2529 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2530 vm_map_unlock(map); 2531 return (KERN_NO_SPACE); 2532 } 2533 2534 /* If we would blow our VMEM resource limit, no go */ 2535 if (map->size + init_ssize > vmemlim) { 2536 vm_map_unlock(map); 2537 return (KERN_NO_SPACE); 2538 } 2539 2540 /* 2541 * If we can't accomodate max_ssize in the current mapping, no go. 2542 * However, we need to be aware that subsequent user mappings might 2543 * map into the space we have reserved for stack, and currently this 2544 * space is not protected. 2545 * 2546 * Hopefully we will at least detect this condition when we try to 2547 * grow the stack. 2548 */ 2549 if ((prev_entry->next != &map->header) && 2550 (prev_entry->next->start < addrbos + max_ssize)) { 2551 vm_map_unlock(map); 2552 return (KERN_NO_SPACE); 2553 } 2554 2555 /* 2556 * We initially map a stack of only init_ssize. We will grow as 2557 * needed later. Depending on the orientation of the stack (i.e. 2558 * the grow direction) we either map at the top of the range, the 2559 * bottom of the range or in the middle. 2560 * 2561 * Note: we would normally expect prot and max to be VM_PROT_ALL, 2562 * and cow to be 0. Possibly we should eliminate these as input 2563 * parameters, and just pass these values here in the insert call. 2564 */ 2565 if (orient == MAP_STACK_GROWS_DOWN) 2566 bot = addrbos + max_ssize - init_ssize; 2567 else if (orient == MAP_STACK_GROWS_UP) 2568 bot = addrbos; 2569 else 2570 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 2571 top = bot + init_ssize; 2572 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 2573 2574 /* Now set the avail_ssize amount. */ 2575 if (rv == KERN_SUCCESS) { 2576 if (prev_entry != &map->header) 2577 vm_map_clip_end(map, prev_entry, bot); 2578 new_entry = prev_entry->next; 2579 if (new_entry->end != top || new_entry->start != bot) 2580 panic("Bad entry start/end for new stack entry"); 2581 2582 new_entry->avail_ssize = max_ssize - init_ssize; 2583 if (orient & MAP_STACK_GROWS_DOWN) 2584 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2585 if (orient & MAP_STACK_GROWS_UP) 2586 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 2587 } 2588 2589 vm_map_unlock(map); 2590 return (rv); 2591 } 2592 2593 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2594 * desired address is already mapped, or if we successfully grow 2595 * the stack. Also returns KERN_SUCCESS if addr is outside the 2596 * stack range (this is strange, but preserves compatibility with 2597 * the grow function in vm_machdep.c). 2598 */ 2599 int 2600 vm_map_growstack(struct proc *p, vm_offset_t addr) 2601 { 2602 vm_map_entry_t next_entry, prev_entry; 2603 vm_map_entry_t new_entry, stack_entry; 2604 struct vmspace *vm = p->p_vmspace; 2605 vm_map_t map = &vm->vm_map; 2606 vm_offset_t end; 2607 size_t grow_amount, max_grow; 2608 rlim_t stacklim, vmemlim; 2609 int is_procstack, rv; 2610 2611 Retry: 2612 PROC_LOCK(p); 2613 stacklim = lim_cur(p, RLIMIT_STACK); 2614 vmemlim = lim_cur(p, RLIMIT_VMEM); 2615 PROC_UNLOCK(p); 2616 2617 vm_map_lock_read(map); 2618 2619 /* If addr is already in the entry range, no need to grow.*/ 2620 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2621 vm_map_unlock_read(map); 2622 return (KERN_SUCCESS); 2623 } 2624 2625 next_entry = prev_entry->next; 2626 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 2627 /* 2628 * This entry does not grow upwards. Since the address lies 2629 * beyond this entry, the next entry (if one exists) has to 2630 * be a downward growable entry. The entry list header is 2631 * never a growable entry, so it suffices to check the flags. 2632 */ 2633 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 2634 vm_map_unlock_read(map); 2635 return (KERN_SUCCESS); 2636 } 2637 stack_entry = next_entry; 2638 } else { 2639 /* 2640 * This entry grows upward. If the next entry does not at 2641 * least grow downwards, this is the entry we need to grow. 2642 * otherwise we have two possible choices and we have to 2643 * select one. 2644 */ 2645 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 2646 /* 2647 * We have two choices; grow the entry closest to 2648 * the address to minimize the amount of growth. 2649 */ 2650 if (addr - prev_entry->end <= next_entry->start - addr) 2651 stack_entry = prev_entry; 2652 else 2653 stack_entry = next_entry; 2654 } else 2655 stack_entry = prev_entry; 2656 } 2657 2658 if (stack_entry == next_entry) { 2659 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 2660 KASSERT(addr < stack_entry->start, ("foo")); 2661 end = (prev_entry != &map->header) ? prev_entry->end : 2662 stack_entry->start - stack_entry->avail_ssize; 2663 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 2664 max_grow = stack_entry->start - end; 2665 } else { 2666 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 2667 KASSERT(addr >= stack_entry->end, ("foo")); 2668 end = (next_entry != &map->header) ? next_entry->start : 2669 stack_entry->end + stack_entry->avail_ssize; 2670 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 2671 max_grow = end - stack_entry->end; 2672 } 2673 2674 if (grow_amount > stack_entry->avail_ssize) { 2675 vm_map_unlock_read(map); 2676 return (KERN_NO_SPACE); 2677 } 2678 2679 /* 2680 * If there is no longer enough space between the entries nogo, and 2681 * adjust the available space. Note: this should only happen if the 2682 * user has mapped into the stack area after the stack was created, 2683 * and is probably an error. 2684 * 2685 * This also effectively destroys any guard page the user might have 2686 * intended by limiting the stack size. 2687 */ 2688 if (grow_amount > max_grow) { 2689 if (vm_map_lock_upgrade(map)) 2690 goto Retry; 2691 2692 stack_entry->avail_ssize = max_grow; 2693 2694 vm_map_unlock(map); 2695 return (KERN_NO_SPACE); 2696 } 2697 2698 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 2699 2700 /* 2701 * If this is the main process stack, see if we're over the stack 2702 * limit. 2703 */ 2704 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2705 vm_map_unlock_read(map); 2706 return (KERN_NO_SPACE); 2707 } 2708 2709 /* Round up the grow amount modulo SGROWSIZ */ 2710 grow_amount = roundup (grow_amount, sgrowsiz); 2711 if (grow_amount > stack_entry->avail_ssize) 2712 grow_amount = stack_entry->avail_ssize; 2713 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2714 grow_amount = stacklim - ctob(vm->vm_ssize); 2715 } 2716 2717 /* If we would blow our VMEM resource limit, no go */ 2718 if (map->size + grow_amount > vmemlim) { 2719 vm_map_unlock_read(map); 2720 return (KERN_NO_SPACE); 2721 } 2722 2723 if (vm_map_lock_upgrade(map)) 2724 goto Retry; 2725 2726 if (stack_entry == next_entry) { 2727 /* 2728 * Growing downward. 2729 */ 2730 /* Get the preliminary new entry start value */ 2731 addr = stack_entry->start - grow_amount; 2732 2733 /* 2734 * If this puts us into the previous entry, cut back our 2735 * growth to the available space. Also, see the note above. 2736 */ 2737 if (addr < end) { 2738 stack_entry->avail_ssize = max_grow; 2739 addr = end; 2740 } 2741 2742 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2743 p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 2744 2745 /* Adjust the available stack space by the amount we grew. */ 2746 if (rv == KERN_SUCCESS) { 2747 if (prev_entry != &map->header) 2748 vm_map_clip_end(map, prev_entry, addr); 2749 new_entry = prev_entry->next; 2750 KASSERT(new_entry == stack_entry->prev, ("foo")); 2751 KASSERT(new_entry->end == stack_entry->start, ("foo")); 2752 KASSERT(new_entry->start == addr, ("foo")); 2753 grow_amount = new_entry->end - new_entry->start; 2754 new_entry->avail_ssize = stack_entry->avail_ssize - 2755 grow_amount; 2756 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 2757 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2758 } 2759 } else { 2760 /* 2761 * Growing upward. 2762 */ 2763 addr = stack_entry->end + grow_amount; 2764 2765 /* 2766 * If this puts us into the next entry, cut back our growth 2767 * to the available space. Also, see the note above. 2768 */ 2769 if (addr > end) { 2770 stack_entry->avail_ssize = end - stack_entry->end; 2771 addr = end; 2772 } 2773 2774 grow_amount = addr - stack_entry->end; 2775 2776 /* Grow the underlying object if applicable. */ 2777 if (stack_entry->object.vm_object == NULL || 2778 vm_object_coalesce(stack_entry->object.vm_object, 2779 stack_entry->offset, 2780 (vm_size_t)(stack_entry->end - stack_entry->start), 2781 (vm_size_t)grow_amount)) { 2782 map->size += (addr - stack_entry->end); 2783 /* Update the current entry. */ 2784 stack_entry->end = addr; 2785 stack_entry->avail_ssize -= grow_amount; 2786 rv = KERN_SUCCESS; 2787 2788 if (next_entry != &map->header) 2789 vm_map_clip_start(map, next_entry, addr); 2790 } else 2791 rv = KERN_FAILURE; 2792 } 2793 2794 if (rv == KERN_SUCCESS && is_procstack) 2795 vm->vm_ssize += btoc(grow_amount); 2796 2797 vm_map_unlock(map); 2798 2799 /* 2800 * Heed the MAP_WIREFUTURE flag if it was set for this process. 2801 */ 2802 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 2803 vm_map_wire(map, 2804 (stack_entry == next_entry) ? addr : addr - grow_amount, 2805 (stack_entry == next_entry) ? stack_entry->start : addr, 2806 (p->p_flag & P_SYSTEM) 2807 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 2808 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 2809 } 2810 2811 return (rv); 2812 } 2813 2814 /* 2815 * Unshare the specified VM space for exec. If other processes are 2816 * mapped to it, then create a new one. The new vmspace is null. 2817 */ 2818 void 2819 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 2820 { 2821 struct vmspace *oldvmspace = p->p_vmspace; 2822 struct vmspace *newvmspace; 2823 2824 GIANT_REQUIRED; 2825 newvmspace = vmspace_alloc(minuser, maxuser); 2826 newvmspace->vm_swrss = oldvmspace->vm_swrss; 2827 /* 2828 * This code is written like this for prototype purposes. The 2829 * goal is to avoid running down the vmspace here, but let the 2830 * other process's that are still using the vmspace to finally 2831 * run it down. Even though there is little or no chance of blocking 2832 * here, it is a good idea to keep this form for future mods. 2833 */ 2834 p->p_vmspace = newvmspace; 2835 if (p == curthread->td_proc) /* XXXKSE ? */ 2836 pmap_activate(curthread); 2837 vmspace_free(oldvmspace); 2838 } 2839 2840 /* 2841 * Unshare the specified VM space for forcing COW. This 2842 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2843 */ 2844 void 2845 vmspace_unshare(struct proc *p) 2846 { 2847 struct vmspace *oldvmspace = p->p_vmspace; 2848 struct vmspace *newvmspace; 2849 2850 GIANT_REQUIRED; 2851 if (oldvmspace->vm_refcnt == 1) 2852 return; 2853 newvmspace = vmspace_fork(oldvmspace); 2854 p->p_vmspace = newvmspace; 2855 if (p == curthread->td_proc) /* XXXKSE ? */ 2856 pmap_activate(curthread); 2857 vmspace_free(oldvmspace); 2858 } 2859 2860 /* 2861 * vm_map_lookup: 2862 * 2863 * Finds the VM object, offset, and 2864 * protection for a given virtual address in the 2865 * specified map, assuming a page fault of the 2866 * type specified. 2867 * 2868 * Leaves the map in question locked for read; return 2869 * values are guaranteed until a vm_map_lookup_done 2870 * call is performed. Note that the map argument 2871 * is in/out; the returned map must be used in 2872 * the call to vm_map_lookup_done. 2873 * 2874 * A handle (out_entry) is returned for use in 2875 * vm_map_lookup_done, to make that fast. 2876 * 2877 * If a lookup is requested with "write protection" 2878 * specified, the map may be changed to perform virtual 2879 * copying operations, although the data referenced will 2880 * remain the same. 2881 */ 2882 int 2883 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2884 vm_offset_t vaddr, 2885 vm_prot_t fault_typea, 2886 vm_map_entry_t *out_entry, /* OUT */ 2887 vm_object_t *object, /* OUT */ 2888 vm_pindex_t *pindex, /* OUT */ 2889 vm_prot_t *out_prot, /* OUT */ 2890 boolean_t *wired) /* OUT */ 2891 { 2892 vm_map_entry_t entry; 2893 vm_map_t map = *var_map; 2894 vm_prot_t prot; 2895 vm_prot_t fault_type = fault_typea; 2896 2897 RetryLookup:; 2898 /* 2899 * Lookup the faulting address. 2900 */ 2901 2902 vm_map_lock_read(map); 2903 #define RETURN(why) \ 2904 { \ 2905 vm_map_unlock_read(map); \ 2906 return (why); \ 2907 } 2908 2909 /* 2910 * If the map has an interesting hint, try it before calling full 2911 * blown lookup routine. 2912 */ 2913 entry = map->root; 2914 *out_entry = entry; 2915 if (entry == NULL || 2916 (vaddr < entry->start) || (vaddr >= entry->end)) { 2917 /* 2918 * Entry was either not a valid hint, or the vaddr was not 2919 * contained in the entry, so do a full lookup. 2920 */ 2921 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 2922 RETURN(KERN_INVALID_ADDRESS); 2923 2924 entry = *out_entry; 2925 } 2926 2927 /* 2928 * Handle submaps. 2929 */ 2930 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2931 vm_map_t old_map = map; 2932 2933 *var_map = map = entry->object.sub_map; 2934 vm_map_unlock_read(old_map); 2935 goto RetryLookup; 2936 } 2937 2938 /* 2939 * Check whether this task is allowed to have this page. 2940 * Note the special case for MAP_ENTRY_COW 2941 * pages with an override. This is to implement a forced 2942 * COW for debuggers. 2943 */ 2944 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2945 prot = entry->max_protection; 2946 else 2947 prot = entry->protection; 2948 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2949 if ((fault_type & prot) != fault_type) { 2950 RETURN(KERN_PROTECTION_FAILURE); 2951 } 2952 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2953 (entry->eflags & MAP_ENTRY_COW) && 2954 (fault_type & VM_PROT_WRITE) && 2955 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2956 RETURN(KERN_PROTECTION_FAILURE); 2957 } 2958 2959 /* 2960 * If this page is not pageable, we have to get it for all possible 2961 * accesses. 2962 */ 2963 *wired = (entry->wired_count != 0); 2964 if (*wired) 2965 prot = fault_type = entry->protection; 2966 2967 /* 2968 * If the entry was copy-on-write, we either ... 2969 */ 2970 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2971 /* 2972 * If we want to write the page, we may as well handle that 2973 * now since we've got the map locked. 2974 * 2975 * If we don't need to write the page, we just demote the 2976 * permissions allowed. 2977 */ 2978 if (fault_type & VM_PROT_WRITE) { 2979 /* 2980 * Make a new object, and place it in the object 2981 * chain. Note that no new references have appeared 2982 * -- one just moved from the map to the new 2983 * object. 2984 */ 2985 if (vm_map_lock_upgrade(map)) 2986 goto RetryLookup; 2987 2988 vm_object_shadow( 2989 &entry->object.vm_object, 2990 &entry->offset, 2991 atop(entry->end - entry->start)); 2992 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2993 2994 vm_map_lock_downgrade(map); 2995 } else { 2996 /* 2997 * We're attempting to read a copy-on-write page -- 2998 * don't allow writes. 2999 */ 3000 prot &= ~VM_PROT_WRITE; 3001 } 3002 } 3003 3004 /* 3005 * Create an object if necessary. 3006 */ 3007 if (entry->object.vm_object == NULL && 3008 !map->system_map) { 3009 if (vm_map_lock_upgrade(map)) 3010 goto RetryLookup; 3011 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3012 atop(entry->end - entry->start)); 3013 entry->offset = 0; 3014 vm_map_lock_downgrade(map); 3015 } 3016 3017 /* 3018 * Return the object/offset from this entry. If the entry was 3019 * copy-on-write or empty, it has been fixed up. 3020 */ 3021 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3022 *object = entry->object.vm_object; 3023 3024 /* 3025 * Return whether this is the only map sharing this data. 3026 */ 3027 *out_prot = prot; 3028 return (KERN_SUCCESS); 3029 3030 #undef RETURN 3031 } 3032 3033 /* 3034 * vm_map_lookup_done: 3035 * 3036 * Releases locks acquired by a vm_map_lookup 3037 * (according to the handle returned by that lookup). 3038 */ 3039 void 3040 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3041 { 3042 /* 3043 * Unlock the main-level map 3044 */ 3045 vm_map_unlock_read(map); 3046 } 3047 3048 #include "opt_ddb.h" 3049 #ifdef DDB 3050 #include <sys/kernel.h> 3051 3052 #include <ddb/ddb.h> 3053 3054 /* 3055 * vm_map_print: [ debug ] 3056 */ 3057 DB_SHOW_COMMAND(map, vm_map_print) 3058 { 3059 static int nlines; 3060 /* XXX convert args. */ 3061 vm_map_t map = (vm_map_t)addr; 3062 boolean_t full = have_addr; 3063 3064 vm_map_entry_t entry; 3065 3066 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3067 (void *)map, 3068 (void *)map->pmap, map->nentries, map->timestamp); 3069 nlines++; 3070 3071 if (!full && db_indent) 3072 return; 3073 3074 db_indent += 2; 3075 for (entry = map->header.next; entry != &map->header; 3076 entry = entry->next) { 3077 db_iprintf("map entry %p: start=%p, end=%p\n", 3078 (void *)entry, (void *)entry->start, (void *)entry->end); 3079 nlines++; 3080 { 3081 static char *inheritance_name[4] = 3082 {"share", "copy", "none", "donate_copy"}; 3083 3084 db_iprintf(" prot=%x/%x/%s", 3085 entry->protection, 3086 entry->max_protection, 3087 inheritance_name[(int)(unsigned char)entry->inheritance]); 3088 if (entry->wired_count != 0) 3089 db_printf(", wired"); 3090 } 3091 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3092 db_printf(", share=%p, offset=0x%jx\n", 3093 (void *)entry->object.sub_map, 3094 (uintmax_t)entry->offset); 3095 nlines++; 3096 if ((entry->prev == &map->header) || 3097 (entry->prev->object.sub_map != 3098 entry->object.sub_map)) { 3099 db_indent += 2; 3100 vm_map_print((db_expr_t)(intptr_t) 3101 entry->object.sub_map, 3102 full, 0, (char *)0); 3103 db_indent -= 2; 3104 } 3105 } else { 3106 db_printf(", object=%p, offset=0x%jx", 3107 (void *)entry->object.vm_object, 3108 (uintmax_t)entry->offset); 3109 if (entry->eflags & MAP_ENTRY_COW) 3110 db_printf(", copy (%s)", 3111 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3112 db_printf("\n"); 3113 nlines++; 3114 3115 if ((entry->prev == &map->header) || 3116 (entry->prev->object.vm_object != 3117 entry->object.vm_object)) { 3118 db_indent += 2; 3119 vm_object_print((db_expr_t)(intptr_t) 3120 entry->object.vm_object, 3121 full, 0, (char *)0); 3122 nlines += 4; 3123 db_indent -= 2; 3124 } 3125 } 3126 } 3127 db_indent -= 2; 3128 if (db_indent == 0) 3129 nlines = 0; 3130 } 3131 3132 3133 DB_SHOW_COMMAND(procvm, procvm) 3134 { 3135 struct proc *p; 3136 3137 if (have_addr) { 3138 p = (struct proc *) addr; 3139 } else { 3140 p = curproc; 3141 } 3142 3143 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3144 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3145 (void *)vmspace_pmap(p->p_vmspace)); 3146 3147 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3148 } 3149 3150 #endif /* DDB */ 3151