1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory mapping module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/vmmeter.h> 75 #include <sys/mman.h> 76 #include <sys/vnode.h> 77 #include <sys/resourcevar.h> 78 #include <sys/file.h> 79 #include <sys/sysent.h> 80 #include <sys/shm.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/swap_pager.h> 92 #include <vm/uma.h> 93 94 /* 95 * Virtual memory maps provide for the mapping, protection, 96 * and sharing of virtual memory objects. In addition, 97 * this module provides for an efficient virtual copy of 98 * memory from one map to another. 99 * 100 * Synchronization is required prior to most operations. 101 * 102 * Maps consist of an ordered doubly-linked list of simple 103 * entries; a single hint is used to speed up lookups. 104 * 105 * Since portions of maps are specified by start/end addresses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * 113 * As mentioned above, virtual copy operations are performed 114 * by copying VM object references from one map to 115 * another, and then marking both regions as copy-on-write. 116 */ 117 118 /* 119 * vm_map_startup: 120 * 121 * Initialize the vm_map module. Must be called before 122 * any other vm_map routines. 123 * 124 * Map and entry structures are allocated from the general 125 * purpose memory pool with some exceptions: 126 * 127 * - The kernel map and kmem submap are allocated statically. 128 * - Kernel map entries are allocated out of a static pool. 129 * 130 * These restrictions are necessary since malloc() uses the 131 * maps and requires map entries. 132 */ 133 134 static struct mtx map_sleep_mtx; 135 static uma_zone_t mapentzone; 136 static uma_zone_t kmapentzone; 137 static uma_zone_t mapzone; 138 static uma_zone_t vmspace_zone; 139 static struct vm_object kmapentobj; 140 static int vmspace_zinit(void *mem, int size, int flags); 141 static void vmspace_zfini(void *mem, int size); 142 static int vm_map_zinit(void *mem, int ize, int flags); 143 static void vm_map_zfini(void *mem, int size); 144 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max); 145 146 #ifdef INVARIANTS 147 static void vm_map_zdtor(void *mem, int size, void *arg); 148 static void vmspace_zdtor(void *mem, int size, void *arg); 149 #endif 150 151 void 152 vm_map_startup(void) 153 { 154 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 155 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 156 #ifdef INVARIANTS 157 vm_map_zdtor, 158 #else 159 NULL, 160 #endif 161 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 162 uma_prealloc(mapzone, MAX_KMAP); 163 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 164 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 165 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 166 uma_prealloc(kmapentzone, MAX_KMAPENT); 167 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 168 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 169 uma_prealloc(mapentzone, MAX_MAPENT); 170 } 171 172 static void 173 vmspace_zfini(void *mem, int size) 174 { 175 struct vmspace *vm; 176 177 vm = (struct vmspace *)mem; 178 pmap_release(vmspace_pmap(vm)); 179 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); 180 } 181 182 static int 183 vmspace_zinit(void *mem, int size, int flags) 184 { 185 struct vmspace *vm; 186 187 vm = (struct vmspace *)mem; 188 189 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 190 pmap_pinit(vmspace_pmap(vm)); 191 return (0); 192 } 193 194 static void 195 vm_map_zfini(void *mem, int size) 196 { 197 vm_map_t map; 198 199 map = (vm_map_t)mem; 200 mtx_destroy(&map->system_mtx); 201 sx_destroy(&map->lock); 202 } 203 204 static int 205 vm_map_zinit(void *mem, int size, int flags) 206 { 207 vm_map_t map; 208 209 map = (vm_map_t)mem; 210 map->nentries = 0; 211 map->size = 0; 212 map->infork = 0; 213 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 214 sx_init(&map->lock, "user map"); 215 return (0); 216 } 217 218 #ifdef INVARIANTS 219 static void 220 vmspace_zdtor(void *mem, int size, void *arg) 221 { 222 struct vmspace *vm; 223 224 vm = (struct vmspace *)mem; 225 226 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 227 } 228 static void 229 vm_map_zdtor(void *mem, int size, void *arg) 230 { 231 vm_map_t map; 232 233 map = (vm_map_t)mem; 234 KASSERT(map->nentries == 0, 235 ("map %p nentries == %d on free.", 236 map, map->nentries)); 237 KASSERT(map->size == 0, 238 ("map %p size == %lu on free.", 239 map, (unsigned long)map->size)); 240 KASSERT(map->infork == 0, 241 ("map %p infork == %d on free.", 242 map, map->infork)); 243 } 244 #endif /* INVARIANTS */ 245 246 /* 247 * Allocate a vmspace structure, including a vm_map and pmap, 248 * and initialize those structures. The refcnt is set to 1. 249 */ 250 struct vmspace * 251 vmspace_alloc(min, max) 252 vm_offset_t min, max; 253 { 254 struct vmspace *vm; 255 256 vm = uma_zalloc(vmspace_zone, M_WAITOK); 257 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 258 _vm_map_init(&vm->vm_map, min, max); 259 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 260 vm->vm_refcnt = 1; 261 vm->vm_shm = NULL; 262 vm->vm_swrss = 0; 263 vm->vm_tsize = 0; 264 vm->vm_dsize = 0; 265 vm->vm_ssize = 0; 266 vm->vm_taddr = 0; 267 vm->vm_daddr = 0; 268 vm->vm_maxsaddr = 0; 269 vm->vm_exitingcnt = 0; 270 return (vm); 271 } 272 273 void 274 vm_init2(void) 275 { 276 uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, 277 (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 + 278 maxproc * 2 + maxfiles); 279 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 280 #ifdef INVARIANTS 281 vmspace_zdtor, 282 #else 283 NULL, 284 #endif 285 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 286 pmap_init2(); 287 } 288 289 static __inline void 290 vmspace_dofree(struct vmspace *vm) 291 { 292 CTR1(KTR_VM, "vmspace_free: %p", vm); 293 294 /* 295 * Make sure any SysV shm is freed, it might not have been in 296 * exit1(). 297 */ 298 shmexit(vm); 299 300 /* 301 * Lock the map, to wait out all other references to it. 302 * Delete all of the mappings and pages they hold, then call 303 * the pmap module to reclaim anything left. 304 */ 305 vm_map_lock(&vm->vm_map); 306 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 307 vm->vm_map.max_offset); 308 vm_map_unlock(&vm->vm_map); 309 310 uma_zfree(vmspace_zone, vm); 311 } 312 313 void 314 vmspace_free(struct vmspace *vm) 315 { 316 int refcnt; 317 318 if (vm->vm_refcnt == 0) 319 panic("vmspace_free: attempt to free already freed vmspace"); 320 321 do 322 refcnt = vm->vm_refcnt; 323 while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); 324 if (refcnt == 1 && vm->vm_exitingcnt == 0) 325 vmspace_dofree(vm); 326 } 327 328 void 329 vmspace_exitfree(struct proc *p) 330 { 331 struct vmspace *vm; 332 int exitingcnt; 333 334 vm = p->p_vmspace; 335 p->p_vmspace = NULL; 336 337 /* 338 * cleanup by parent process wait()ing on exiting child. vm_refcnt 339 * may not be 0 (e.g. fork() and child exits without exec()ing). 340 * exitingcnt may increment above 0 and drop back down to zero 341 * several times while vm_refcnt is held non-zero. vm_refcnt 342 * may also increment above 0 and drop back down to zero several 343 * times while vm_exitingcnt is held non-zero. 344 * 345 * The last wait on the exiting child's vmspace will clean up 346 * the remainder of the vmspace. 347 */ 348 do 349 exitingcnt = vm->vm_exitingcnt; 350 while (!atomic_cmpset_int(&vm->vm_exitingcnt, exitingcnt, 351 exitingcnt - 1)); 352 if (vm->vm_refcnt == 0 && exitingcnt == 1) 353 vmspace_dofree(vm); 354 } 355 356 void 357 _vm_map_lock(vm_map_t map, const char *file, int line) 358 { 359 360 if (map->system_map) 361 _mtx_lock_flags(&map->system_mtx, 0, file, line); 362 else 363 _sx_xlock(&map->lock, file, line); 364 map->timestamp++; 365 } 366 367 void 368 _vm_map_unlock(vm_map_t map, const char *file, int line) 369 { 370 371 if (map->system_map) 372 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 373 else 374 _sx_xunlock(&map->lock, file, line); 375 } 376 377 void 378 _vm_map_lock_read(vm_map_t map, const char *file, int line) 379 { 380 381 if (map->system_map) 382 _mtx_lock_flags(&map->system_mtx, 0, file, line); 383 else 384 _sx_xlock(&map->lock, file, line); 385 } 386 387 void 388 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 389 { 390 391 if (map->system_map) 392 _mtx_unlock_flags(&map->system_mtx, 0, file, line); 393 else 394 _sx_xunlock(&map->lock, file, line); 395 } 396 397 int 398 _vm_map_trylock(vm_map_t map, const char *file, int line) 399 { 400 int error; 401 402 error = map->system_map ? 403 !_mtx_trylock(&map->system_mtx, 0, file, line) : 404 !_sx_try_xlock(&map->lock, file, line); 405 if (error == 0) 406 map->timestamp++; 407 return (error == 0); 408 } 409 410 int 411 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 412 { 413 int error; 414 415 error = map->system_map ? 416 !_mtx_trylock(&map->system_mtx, 0, file, line) : 417 !_sx_try_xlock(&map->lock, file, line); 418 return (error == 0); 419 } 420 421 int 422 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 423 { 424 425 #ifdef INVARIANTS 426 if (map->system_map) { 427 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 428 } else 429 _sx_assert(&map->lock, SX_XLOCKED, file, line); 430 #endif 431 map->timestamp++; 432 return (0); 433 } 434 435 void 436 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 437 { 438 439 #ifdef INVARIANTS 440 if (map->system_map) { 441 _mtx_assert(&map->system_mtx, MA_OWNED, file, line); 442 } else 443 _sx_assert(&map->lock, SX_XLOCKED, file, line); 444 #endif 445 } 446 447 /* 448 * vm_map_unlock_and_wait: 449 */ 450 int 451 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) 452 { 453 454 mtx_lock(&map_sleep_mtx); 455 vm_map_unlock(map); 456 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0)); 457 } 458 459 /* 460 * vm_map_wakeup: 461 */ 462 void 463 vm_map_wakeup(vm_map_t map) 464 { 465 466 /* 467 * Acquire and release map_sleep_mtx to prevent a wakeup() 468 * from being performed (and lost) between the vm_map_unlock() 469 * and the msleep() in vm_map_unlock_and_wait(). 470 */ 471 mtx_lock(&map_sleep_mtx); 472 mtx_unlock(&map_sleep_mtx); 473 wakeup(&map->root); 474 } 475 476 long 477 vmspace_resident_count(struct vmspace *vmspace) 478 { 479 return pmap_resident_count(vmspace_pmap(vmspace)); 480 } 481 482 long 483 vmspace_wired_count(struct vmspace *vmspace) 484 { 485 return pmap_wired_count(vmspace_pmap(vmspace)); 486 } 487 488 /* 489 * vm_map_create: 490 * 491 * Creates and returns a new empty VM map with 492 * the given physical map structure, and having 493 * the given lower and upper address bounds. 494 */ 495 vm_map_t 496 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 497 { 498 vm_map_t result; 499 500 result = uma_zalloc(mapzone, M_WAITOK); 501 CTR1(KTR_VM, "vm_map_create: %p", result); 502 _vm_map_init(result, min, max); 503 result->pmap = pmap; 504 return (result); 505 } 506 507 /* 508 * Initialize an existing vm_map structure 509 * such as that in the vmspace structure. 510 * The pmap is set elsewhere. 511 */ 512 static void 513 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 514 { 515 516 map->header.next = map->header.prev = &map->header; 517 map->needs_wakeup = FALSE; 518 map->system_map = 0; 519 map->min_offset = min; 520 map->max_offset = max; 521 map->first_free = &map->header; 522 map->flags = 0; 523 map->root = NULL; 524 map->timestamp = 0; 525 } 526 527 void 528 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 529 { 530 _vm_map_init(map, min, max); 531 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 532 sx_init(&map->lock, "user map"); 533 } 534 535 /* 536 * vm_map_entry_dispose: [ internal use only ] 537 * 538 * Inverse of vm_map_entry_create. 539 */ 540 static void 541 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 542 { 543 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 544 } 545 546 /* 547 * vm_map_entry_create: [ internal use only ] 548 * 549 * Allocates a VM map entry for insertion. 550 * No entry fields are filled in. 551 */ 552 static vm_map_entry_t 553 vm_map_entry_create(vm_map_t map) 554 { 555 vm_map_entry_t new_entry; 556 557 if (map->system_map) 558 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 559 else 560 new_entry = uma_zalloc(mapentzone, M_WAITOK); 561 if (new_entry == NULL) 562 panic("vm_map_entry_create: kernel resources exhausted"); 563 return (new_entry); 564 } 565 566 /* 567 * vm_map_entry_set_behavior: 568 * 569 * Set the expected access behavior, either normal, random, or 570 * sequential. 571 */ 572 static __inline void 573 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 574 { 575 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 576 (behavior & MAP_ENTRY_BEHAV_MASK); 577 } 578 579 /* 580 * vm_map_entry_splay: 581 * 582 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 583 * the vm_map_entry containing the given address. If, however, that 584 * address is not found in the vm_map, returns a vm_map_entry that is 585 * adjacent to the address, coming before or after it. 586 */ 587 static vm_map_entry_t 588 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root) 589 { 590 struct vm_map_entry dummy; 591 vm_map_entry_t lefttreemax, righttreemin, y; 592 593 if (root == NULL) 594 return (root); 595 lefttreemax = righttreemin = &dummy; 596 for (;; root = y) { 597 if (address < root->start) { 598 if ((y = root->left) == NULL) 599 break; 600 if (address < y->start) { 601 /* Rotate right. */ 602 root->left = y->right; 603 y->right = root; 604 root = y; 605 if ((y = root->left) == NULL) 606 break; 607 } 608 /* Link into the new root's right tree. */ 609 righttreemin->left = root; 610 righttreemin = root; 611 } else if (address >= root->end) { 612 if ((y = root->right) == NULL) 613 break; 614 if (address >= y->end) { 615 /* Rotate left. */ 616 root->right = y->left; 617 y->left = root; 618 root = y; 619 if ((y = root->right) == NULL) 620 break; 621 } 622 /* Link into the new root's left tree. */ 623 lefttreemax->right = root; 624 lefttreemax = root; 625 } else 626 break; 627 } 628 /* Assemble the new root. */ 629 lefttreemax->right = root->left; 630 righttreemin->left = root->right; 631 root->left = dummy.right; 632 root->right = dummy.left; 633 return (root); 634 } 635 636 /* 637 * vm_map_entry_{un,}link: 638 * 639 * Insert/remove entries from maps. 640 */ 641 static void 642 vm_map_entry_link(vm_map_t map, 643 vm_map_entry_t after_where, 644 vm_map_entry_t entry) 645 { 646 647 CTR4(KTR_VM, 648 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 649 map->nentries, entry, after_where); 650 map->nentries++; 651 entry->prev = after_where; 652 entry->next = after_where->next; 653 entry->next->prev = entry; 654 after_where->next = entry; 655 656 if (after_where != &map->header) { 657 if (after_where != map->root) 658 vm_map_entry_splay(after_where->start, map->root); 659 entry->right = after_where->right; 660 entry->left = after_where; 661 after_where->right = NULL; 662 } else { 663 entry->right = map->root; 664 entry->left = NULL; 665 } 666 map->root = entry; 667 } 668 669 static void 670 vm_map_entry_unlink(vm_map_t map, 671 vm_map_entry_t entry) 672 { 673 vm_map_entry_t next, prev, root; 674 675 if (entry != map->root) 676 vm_map_entry_splay(entry->start, map->root); 677 if (entry->left == NULL) 678 root = entry->right; 679 else { 680 root = vm_map_entry_splay(entry->start, entry->left); 681 root->right = entry->right; 682 } 683 map->root = root; 684 685 prev = entry->prev; 686 next = entry->next; 687 next->prev = prev; 688 prev->next = next; 689 map->nentries--; 690 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 691 map->nentries, entry); 692 } 693 694 /* 695 * vm_map_lookup_entry: [ internal use only ] 696 * 697 * Finds the map entry containing (or 698 * immediately preceding) the specified address 699 * in the given map; the entry is returned 700 * in the "entry" parameter. The boolean 701 * result indicates whether the address is 702 * actually contained in the map. 703 */ 704 boolean_t 705 vm_map_lookup_entry( 706 vm_map_t map, 707 vm_offset_t address, 708 vm_map_entry_t *entry) /* OUT */ 709 { 710 vm_map_entry_t cur; 711 712 cur = vm_map_entry_splay(address, map->root); 713 if (cur == NULL) 714 *entry = &map->header; 715 else { 716 map->root = cur; 717 718 if (address >= cur->start) { 719 *entry = cur; 720 if (cur->end > address) 721 return (TRUE); 722 } else 723 *entry = cur->prev; 724 } 725 return (FALSE); 726 } 727 728 /* 729 * vm_map_insert: 730 * 731 * Inserts the given whole VM object into the target 732 * map at the specified address range. The object's 733 * size should match that of the address range. 734 * 735 * Requires that the map be locked, and leaves it so. 736 * 737 * If object is non-NULL, ref count must be bumped by caller 738 * prior to making call to account for the new entry. 739 */ 740 int 741 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 742 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 743 int cow) 744 { 745 vm_map_entry_t new_entry; 746 vm_map_entry_t prev_entry; 747 vm_map_entry_t temp_entry; 748 vm_eflags_t protoeflags; 749 750 /* 751 * Check that the start and end points are not bogus. 752 */ 753 if ((start < map->min_offset) || (end > map->max_offset) || 754 (start >= end)) 755 return (KERN_INVALID_ADDRESS); 756 757 /* 758 * Find the entry prior to the proposed starting address; if it's part 759 * of an existing entry, this range is bogus. 760 */ 761 if (vm_map_lookup_entry(map, start, &temp_entry)) 762 return (KERN_NO_SPACE); 763 764 prev_entry = temp_entry; 765 766 /* 767 * Assert that the next entry doesn't overlap the end point. 768 */ 769 if ((prev_entry->next != &map->header) && 770 (prev_entry->next->start < end)) 771 return (KERN_NO_SPACE); 772 773 protoeflags = 0; 774 775 if (cow & MAP_COPY_ON_WRITE) 776 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 777 778 if (cow & MAP_NOFAULT) { 779 protoeflags |= MAP_ENTRY_NOFAULT; 780 781 KASSERT(object == NULL, 782 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 783 } 784 if (cow & MAP_DISABLE_SYNCER) 785 protoeflags |= MAP_ENTRY_NOSYNC; 786 if (cow & MAP_DISABLE_COREDUMP) 787 protoeflags |= MAP_ENTRY_NOCOREDUMP; 788 789 if (object != NULL) { 790 /* 791 * OBJ_ONEMAPPING must be cleared unless this mapping 792 * is trivially proven to be the only mapping for any 793 * of the object's pages. (Object granularity 794 * reference counting is insufficient to recognize 795 * aliases with precision.) 796 */ 797 VM_OBJECT_LOCK(object); 798 if (object->ref_count > 1 || object->shadow_count != 0) 799 vm_object_clear_flag(object, OBJ_ONEMAPPING); 800 VM_OBJECT_UNLOCK(object); 801 } 802 else if ((prev_entry != &map->header) && 803 (prev_entry->eflags == protoeflags) && 804 (prev_entry->end == start) && 805 (prev_entry->wired_count == 0) && 806 ((prev_entry->object.vm_object == NULL) || 807 vm_object_coalesce(prev_entry->object.vm_object, 808 prev_entry->offset, 809 (vm_size_t)(prev_entry->end - prev_entry->start), 810 (vm_size_t)(end - prev_entry->end)))) { 811 /* 812 * We were able to extend the object. Determine if we 813 * can extend the previous map entry to include the 814 * new range as well. 815 */ 816 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 817 (prev_entry->protection == prot) && 818 (prev_entry->max_protection == max)) { 819 map->size += (end - prev_entry->end); 820 prev_entry->end = end; 821 vm_map_simplify_entry(map, prev_entry); 822 return (KERN_SUCCESS); 823 } 824 825 /* 826 * If we can extend the object but cannot extend the 827 * map entry, we have to create a new map entry. We 828 * must bump the ref count on the extended object to 829 * account for it. object may be NULL. 830 */ 831 object = prev_entry->object.vm_object; 832 offset = prev_entry->offset + 833 (prev_entry->end - prev_entry->start); 834 vm_object_reference(object); 835 } 836 837 /* 838 * NOTE: if conditionals fail, object can be NULL here. This occurs 839 * in things like the buffer map where we manage kva but do not manage 840 * backing objects. 841 */ 842 843 /* 844 * Create a new entry 845 */ 846 new_entry = vm_map_entry_create(map); 847 new_entry->start = start; 848 new_entry->end = end; 849 850 new_entry->eflags = protoeflags; 851 new_entry->object.vm_object = object; 852 new_entry->offset = offset; 853 new_entry->avail_ssize = 0; 854 855 new_entry->inheritance = VM_INHERIT_DEFAULT; 856 new_entry->protection = prot; 857 new_entry->max_protection = max; 858 new_entry->wired_count = 0; 859 860 /* 861 * Insert the new entry into the list 862 */ 863 vm_map_entry_link(map, prev_entry, new_entry); 864 map->size += new_entry->end - new_entry->start; 865 866 /* 867 * Update the free space hint 868 */ 869 if ((map->first_free == prev_entry) && 870 (prev_entry->end >= new_entry->start)) { 871 map->first_free = new_entry; 872 } 873 874 #if 0 875 /* 876 * Temporarily removed to avoid MAP_STACK panic, due to 877 * MAP_STACK being a huge hack. Will be added back in 878 * when MAP_STACK (and the user stack mapping) is fixed. 879 */ 880 /* 881 * It may be possible to simplify the entry 882 */ 883 vm_map_simplify_entry(map, new_entry); 884 #endif 885 886 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 887 vm_map_pmap_enter(map, start, prot, 888 object, OFF_TO_IDX(offset), end - start, 889 cow & MAP_PREFAULT_PARTIAL); 890 } 891 892 return (KERN_SUCCESS); 893 } 894 895 /* 896 * Find sufficient space for `length' bytes in the given map, starting at 897 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 898 */ 899 int 900 vm_map_findspace( 901 vm_map_t map, 902 vm_offset_t start, 903 vm_size_t length, 904 vm_offset_t *addr) 905 { 906 vm_map_entry_t entry, next; 907 vm_offset_t end; 908 909 if (start < map->min_offset) 910 start = map->min_offset; 911 if (start > map->max_offset) 912 return (1); 913 914 /* 915 * Look for the first possible address; if there's already something 916 * at this address, we have to start after it. 917 */ 918 if (start == map->min_offset) { 919 if ((entry = map->first_free) != &map->header) 920 start = entry->end; 921 } else { 922 vm_map_entry_t tmp; 923 924 if (vm_map_lookup_entry(map, start, &tmp)) 925 start = tmp->end; 926 entry = tmp; 927 } 928 929 /* 930 * Look through the rest of the map, trying to fit a new region in the 931 * gap between existing regions, or after the very last region. 932 */ 933 for (;; start = (entry = next)->end) { 934 /* 935 * Find the end of the proposed new region. Be sure we didn't 936 * go beyond the end of the map, or wrap around the address; 937 * if so, we lose. Otherwise, if this is the last entry, or 938 * if the proposed new region fits before the next entry, we 939 * win. 940 */ 941 end = start + length; 942 if (end > map->max_offset || end < start) 943 return (1); 944 next = entry->next; 945 if (next == &map->header || next->start >= end) 946 break; 947 } 948 *addr = start; 949 if (map == kernel_map) { 950 vm_offset_t ksize; 951 if ((ksize = round_page(start + length)) > kernel_vm_end) { 952 pmap_growkernel(ksize); 953 } 954 } 955 return (0); 956 } 957 958 /* 959 * vm_map_find finds an unallocated region in the target address 960 * map with the given length. The search is defined to be 961 * first-fit from the specified address; the region found is 962 * returned in the same parameter. 963 * 964 * If object is non-NULL, ref count must be bumped by caller 965 * prior to making call to account for the new entry. 966 */ 967 int 968 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 969 vm_offset_t *addr, /* IN/OUT */ 970 vm_size_t length, boolean_t find_space, vm_prot_t prot, 971 vm_prot_t max, int cow) 972 { 973 vm_offset_t start; 974 int result, s = 0; 975 976 start = *addr; 977 978 if (map == kmem_map) 979 s = splvm(); 980 981 vm_map_lock(map); 982 if (find_space) { 983 if (vm_map_findspace(map, start, length, addr)) { 984 vm_map_unlock(map); 985 if (map == kmem_map) 986 splx(s); 987 return (KERN_NO_SPACE); 988 } 989 start = *addr; 990 } 991 result = vm_map_insert(map, object, offset, 992 start, start + length, prot, max, cow); 993 vm_map_unlock(map); 994 995 if (map == kmem_map) 996 splx(s); 997 998 return (result); 999 } 1000 1001 /* 1002 * vm_map_simplify_entry: 1003 * 1004 * Simplify the given map entry by merging with either neighbor. This 1005 * routine also has the ability to merge with both neighbors. 1006 * 1007 * The map must be locked. 1008 * 1009 * This routine guarentees that the passed entry remains valid (though 1010 * possibly extended). When merging, this routine may delete one or 1011 * both neighbors. 1012 */ 1013 void 1014 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 1015 { 1016 vm_map_entry_t next, prev; 1017 vm_size_t prevsize, esize; 1018 1019 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) 1020 return; 1021 1022 prev = entry->prev; 1023 if (prev != &map->header) { 1024 prevsize = prev->end - prev->start; 1025 if ( (prev->end == entry->start) && 1026 (prev->object.vm_object == entry->object.vm_object) && 1027 (!prev->object.vm_object || 1028 (prev->offset + prevsize == entry->offset)) && 1029 (prev->eflags == entry->eflags) && 1030 (prev->protection == entry->protection) && 1031 (prev->max_protection == entry->max_protection) && 1032 (prev->inheritance == entry->inheritance) && 1033 (prev->wired_count == entry->wired_count)) { 1034 if (map->first_free == prev) 1035 map->first_free = entry; 1036 vm_map_entry_unlink(map, prev); 1037 entry->start = prev->start; 1038 entry->offset = prev->offset; 1039 if (prev->object.vm_object) 1040 vm_object_deallocate(prev->object.vm_object); 1041 vm_map_entry_dispose(map, prev); 1042 } 1043 } 1044 1045 next = entry->next; 1046 if (next != &map->header) { 1047 esize = entry->end - entry->start; 1048 if ((entry->end == next->start) && 1049 (next->object.vm_object == entry->object.vm_object) && 1050 (!entry->object.vm_object || 1051 (entry->offset + esize == next->offset)) && 1052 (next->eflags == entry->eflags) && 1053 (next->protection == entry->protection) && 1054 (next->max_protection == entry->max_protection) && 1055 (next->inheritance == entry->inheritance) && 1056 (next->wired_count == entry->wired_count)) { 1057 if (map->first_free == next) 1058 map->first_free = entry; 1059 vm_map_entry_unlink(map, next); 1060 entry->end = next->end; 1061 if (next->object.vm_object) 1062 vm_object_deallocate(next->object.vm_object); 1063 vm_map_entry_dispose(map, next); 1064 } 1065 } 1066 } 1067 /* 1068 * vm_map_clip_start: [ internal use only ] 1069 * 1070 * Asserts that the given entry begins at or after 1071 * the specified address; if necessary, 1072 * it splits the entry into two. 1073 */ 1074 #define vm_map_clip_start(map, entry, startaddr) \ 1075 { \ 1076 if (startaddr > entry->start) \ 1077 _vm_map_clip_start(map, entry, startaddr); \ 1078 } 1079 1080 /* 1081 * This routine is called only when it is known that 1082 * the entry must be split. 1083 */ 1084 static void 1085 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 1086 { 1087 vm_map_entry_t new_entry; 1088 1089 /* 1090 * Split off the front portion -- note that we must insert the new 1091 * entry BEFORE this one, so that this entry has the specified 1092 * starting address. 1093 */ 1094 vm_map_simplify_entry(map, entry); 1095 1096 /* 1097 * If there is no object backing this entry, we might as well create 1098 * one now. If we defer it, an object can get created after the map 1099 * is clipped, and individual objects will be created for the split-up 1100 * map. This is a bit of a hack, but is also about the best place to 1101 * put this improvement. 1102 */ 1103 if (entry->object.vm_object == NULL && !map->system_map) { 1104 vm_object_t object; 1105 object = vm_object_allocate(OBJT_DEFAULT, 1106 atop(entry->end - entry->start)); 1107 entry->object.vm_object = object; 1108 entry->offset = 0; 1109 } 1110 1111 new_entry = vm_map_entry_create(map); 1112 *new_entry = *entry; 1113 1114 new_entry->end = start; 1115 entry->offset += (start - entry->start); 1116 entry->start = start; 1117 1118 vm_map_entry_link(map, entry->prev, new_entry); 1119 1120 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1121 vm_object_reference(new_entry->object.vm_object); 1122 } 1123 } 1124 1125 /* 1126 * vm_map_clip_end: [ internal use only ] 1127 * 1128 * Asserts that the given entry ends at or before 1129 * the specified address; if necessary, 1130 * it splits the entry into two. 1131 */ 1132 #define vm_map_clip_end(map, entry, endaddr) \ 1133 { \ 1134 if ((endaddr) < (entry->end)) \ 1135 _vm_map_clip_end((map), (entry), (endaddr)); \ 1136 } 1137 1138 /* 1139 * This routine is called only when it is known that 1140 * the entry must be split. 1141 */ 1142 static void 1143 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1144 { 1145 vm_map_entry_t new_entry; 1146 1147 /* 1148 * If there is no object backing this entry, we might as well create 1149 * one now. If we defer it, an object can get created after the map 1150 * is clipped, and individual objects will be created for the split-up 1151 * map. This is a bit of a hack, but is also about the best place to 1152 * put this improvement. 1153 */ 1154 if (entry->object.vm_object == NULL && !map->system_map) { 1155 vm_object_t object; 1156 object = vm_object_allocate(OBJT_DEFAULT, 1157 atop(entry->end - entry->start)); 1158 entry->object.vm_object = object; 1159 entry->offset = 0; 1160 } 1161 1162 /* 1163 * Create a new entry and insert it AFTER the specified entry 1164 */ 1165 new_entry = vm_map_entry_create(map); 1166 *new_entry = *entry; 1167 1168 new_entry->start = entry->end = end; 1169 new_entry->offset += (end - entry->start); 1170 1171 vm_map_entry_link(map, entry, new_entry); 1172 1173 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1174 vm_object_reference(new_entry->object.vm_object); 1175 } 1176 } 1177 1178 /* 1179 * VM_MAP_RANGE_CHECK: [ internal use only ] 1180 * 1181 * Asserts that the starting and ending region 1182 * addresses fall within the valid range of the map. 1183 */ 1184 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1185 { \ 1186 if (start < vm_map_min(map)) \ 1187 start = vm_map_min(map); \ 1188 if (end > vm_map_max(map)) \ 1189 end = vm_map_max(map); \ 1190 if (start > end) \ 1191 start = end; \ 1192 } 1193 1194 /* 1195 * vm_map_submap: [ kernel use only ] 1196 * 1197 * Mark the given range as handled by a subordinate map. 1198 * 1199 * This range must have been created with vm_map_find, 1200 * and no other operations may have been performed on this 1201 * range prior to calling vm_map_submap. 1202 * 1203 * Only a limited number of operations can be performed 1204 * within this rage after calling vm_map_submap: 1205 * vm_fault 1206 * [Don't try vm_map_copy!] 1207 * 1208 * To remove a submapping, one must first remove the 1209 * range from the superior map, and then destroy the 1210 * submap (if desired). [Better yet, don't try it.] 1211 */ 1212 int 1213 vm_map_submap( 1214 vm_map_t map, 1215 vm_offset_t start, 1216 vm_offset_t end, 1217 vm_map_t submap) 1218 { 1219 vm_map_entry_t entry; 1220 int result = KERN_INVALID_ARGUMENT; 1221 1222 vm_map_lock(map); 1223 1224 VM_MAP_RANGE_CHECK(map, start, end); 1225 1226 if (vm_map_lookup_entry(map, start, &entry)) { 1227 vm_map_clip_start(map, entry, start); 1228 } else 1229 entry = entry->next; 1230 1231 vm_map_clip_end(map, entry, end); 1232 1233 if ((entry->start == start) && (entry->end == end) && 1234 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1235 (entry->object.vm_object == NULL)) { 1236 entry->object.sub_map = submap; 1237 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1238 result = KERN_SUCCESS; 1239 } 1240 vm_map_unlock(map); 1241 1242 return (result); 1243 } 1244 1245 /* 1246 * The maximum number of pages to map 1247 */ 1248 #define MAX_INIT_PT 96 1249 1250 /* 1251 * vm_map_pmap_enter: 1252 * 1253 * Preload read-only mappings for the given object into the specified 1254 * map. This eliminates the soft faults on process startup and 1255 * immediately after an mmap(2). 1256 */ 1257 void 1258 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 1259 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 1260 { 1261 vm_offset_t tmpidx; 1262 int psize; 1263 vm_page_t p, mpte; 1264 1265 if ((prot & VM_PROT_READ) == 0 || object == NULL) 1266 return; 1267 mtx_lock(&Giant); 1268 VM_OBJECT_LOCK(object); 1269 if (object->type == OBJT_DEVICE) { 1270 pmap_object_init_pt(map->pmap, addr, object, pindex, size); 1271 goto unlock_return; 1272 } 1273 1274 psize = atop(size); 1275 1276 if (object->type != OBJT_VNODE || 1277 ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && 1278 (object->resident_page_count > MAX_INIT_PT))) { 1279 goto unlock_return; 1280 } 1281 1282 if (psize + pindex > object->size) { 1283 if (object->size < pindex) 1284 goto unlock_return; 1285 psize = object->size - pindex; 1286 } 1287 1288 mpte = NULL; 1289 1290 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1291 if (p->pindex < pindex) { 1292 p = vm_page_splay(pindex, object->root); 1293 if ((object->root = p)->pindex < pindex) 1294 p = TAILQ_NEXT(p, listq); 1295 } 1296 } 1297 /* 1298 * Assert: the variable p is either (1) the page with the 1299 * least pindex greater than or equal to the parameter pindex 1300 * or (2) NULL. 1301 */ 1302 for (; 1303 p != NULL && (tmpidx = p->pindex - pindex) < psize; 1304 p = TAILQ_NEXT(p, listq)) { 1305 /* 1306 * don't allow an madvise to blow away our really 1307 * free pages allocating pv entries. 1308 */ 1309 if ((flags & MAP_PREFAULT_MADVISE) && 1310 cnt.v_free_count < cnt.v_free_reserved) { 1311 break; 1312 } 1313 vm_page_lock_queues(); 1314 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && 1315 (p->busy == 0) && 1316 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1317 if ((p->queue - p->pc) == PQ_CACHE) 1318 vm_page_deactivate(p); 1319 vm_page_busy(p); 1320 vm_page_unlock_queues(); 1321 VM_OBJECT_UNLOCK(object); 1322 mpte = pmap_enter_quick(map->pmap, 1323 addr + ptoa(tmpidx), p, mpte); 1324 VM_OBJECT_LOCK(object); 1325 vm_page_lock_queues(); 1326 vm_page_wakeup(p); 1327 } 1328 vm_page_unlock_queues(); 1329 } 1330 unlock_return: 1331 VM_OBJECT_UNLOCK(object); 1332 mtx_unlock(&Giant); 1333 } 1334 1335 /* 1336 * vm_map_protect: 1337 * 1338 * Sets the protection of the specified address 1339 * region in the target map. If "set_max" is 1340 * specified, the maximum protection is to be set; 1341 * otherwise, only the current protection is affected. 1342 */ 1343 int 1344 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1345 vm_prot_t new_prot, boolean_t set_max) 1346 { 1347 vm_map_entry_t current; 1348 vm_map_entry_t entry; 1349 1350 vm_map_lock(map); 1351 1352 VM_MAP_RANGE_CHECK(map, start, end); 1353 1354 if (vm_map_lookup_entry(map, start, &entry)) { 1355 vm_map_clip_start(map, entry, start); 1356 } else { 1357 entry = entry->next; 1358 } 1359 1360 /* 1361 * Make a first pass to check for protection violations. 1362 */ 1363 current = entry; 1364 while ((current != &map->header) && (current->start < end)) { 1365 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1366 vm_map_unlock(map); 1367 return (KERN_INVALID_ARGUMENT); 1368 } 1369 if ((new_prot & current->max_protection) != new_prot) { 1370 vm_map_unlock(map); 1371 return (KERN_PROTECTION_FAILURE); 1372 } 1373 current = current->next; 1374 } 1375 1376 /* 1377 * Go back and fix up protections. [Note that clipping is not 1378 * necessary the second time.] 1379 */ 1380 current = entry; 1381 while ((current != &map->header) && (current->start < end)) { 1382 vm_prot_t old_prot; 1383 1384 vm_map_clip_end(map, current, end); 1385 1386 old_prot = current->protection; 1387 if (set_max) 1388 current->protection = 1389 (current->max_protection = new_prot) & 1390 old_prot; 1391 else 1392 current->protection = new_prot; 1393 1394 /* 1395 * Update physical map if necessary. Worry about copy-on-write 1396 * here -- CHECK THIS XXX 1397 */ 1398 if (current->protection != old_prot) { 1399 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1400 VM_PROT_ALL) 1401 pmap_protect(map->pmap, current->start, 1402 current->end, 1403 current->protection & MASK(current)); 1404 #undef MASK 1405 } 1406 vm_map_simplify_entry(map, current); 1407 current = current->next; 1408 } 1409 vm_map_unlock(map); 1410 return (KERN_SUCCESS); 1411 } 1412 1413 /* 1414 * vm_map_madvise: 1415 * 1416 * This routine traverses a processes map handling the madvise 1417 * system call. Advisories are classified as either those effecting 1418 * the vm_map_entry structure, or those effecting the underlying 1419 * objects. 1420 */ 1421 int 1422 vm_map_madvise( 1423 vm_map_t map, 1424 vm_offset_t start, 1425 vm_offset_t end, 1426 int behav) 1427 { 1428 vm_map_entry_t current, entry; 1429 int modify_map = 0; 1430 1431 /* 1432 * Some madvise calls directly modify the vm_map_entry, in which case 1433 * we need to use an exclusive lock on the map and we need to perform 1434 * various clipping operations. Otherwise we only need a read-lock 1435 * on the map. 1436 */ 1437 switch(behav) { 1438 case MADV_NORMAL: 1439 case MADV_SEQUENTIAL: 1440 case MADV_RANDOM: 1441 case MADV_NOSYNC: 1442 case MADV_AUTOSYNC: 1443 case MADV_NOCORE: 1444 case MADV_CORE: 1445 modify_map = 1; 1446 vm_map_lock(map); 1447 break; 1448 case MADV_WILLNEED: 1449 case MADV_DONTNEED: 1450 case MADV_FREE: 1451 vm_map_lock_read(map); 1452 break; 1453 default: 1454 return (KERN_INVALID_ARGUMENT); 1455 } 1456 1457 /* 1458 * Locate starting entry and clip if necessary. 1459 */ 1460 VM_MAP_RANGE_CHECK(map, start, end); 1461 1462 if (vm_map_lookup_entry(map, start, &entry)) { 1463 if (modify_map) 1464 vm_map_clip_start(map, entry, start); 1465 } else { 1466 entry = entry->next; 1467 } 1468 1469 if (modify_map) { 1470 /* 1471 * madvise behaviors that are implemented in the vm_map_entry. 1472 * 1473 * We clip the vm_map_entry so that behavioral changes are 1474 * limited to the specified address range. 1475 */ 1476 for (current = entry; 1477 (current != &map->header) && (current->start < end); 1478 current = current->next 1479 ) { 1480 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1481 continue; 1482 1483 vm_map_clip_end(map, current, end); 1484 1485 switch (behav) { 1486 case MADV_NORMAL: 1487 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1488 break; 1489 case MADV_SEQUENTIAL: 1490 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1491 break; 1492 case MADV_RANDOM: 1493 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1494 break; 1495 case MADV_NOSYNC: 1496 current->eflags |= MAP_ENTRY_NOSYNC; 1497 break; 1498 case MADV_AUTOSYNC: 1499 current->eflags &= ~MAP_ENTRY_NOSYNC; 1500 break; 1501 case MADV_NOCORE: 1502 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1503 break; 1504 case MADV_CORE: 1505 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1506 break; 1507 default: 1508 break; 1509 } 1510 vm_map_simplify_entry(map, current); 1511 } 1512 vm_map_unlock(map); 1513 } else { 1514 vm_pindex_t pindex; 1515 int count; 1516 1517 /* 1518 * madvise behaviors that are implemented in the underlying 1519 * vm_object. 1520 * 1521 * Since we don't clip the vm_map_entry, we have to clip 1522 * the vm_object pindex and count. 1523 */ 1524 for (current = entry; 1525 (current != &map->header) && (current->start < end); 1526 current = current->next 1527 ) { 1528 vm_offset_t useStart; 1529 1530 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1531 continue; 1532 1533 pindex = OFF_TO_IDX(current->offset); 1534 count = atop(current->end - current->start); 1535 useStart = current->start; 1536 1537 if (current->start < start) { 1538 pindex += atop(start - current->start); 1539 count -= atop(start - current->start); 1540 useStart = start; 1541 } 1542 if (current->end > end) 1543 count -= atop(current->end - end); 1544 1545 if (count <= 0) 1546 continue; 1547 1548 vm_object_madvise(current->object.vm_object, 1549 pindex, count, behav); 1550 if (behav == MADV_WILLNEED) { 1551 vm_map_pmap_enter(map, 1552 useStart, 1553 current->protection, 1554 current->object.vm_object, 1555 pindex, 1556 (count << PAGE_SHIFT), 1557 MAP_PREFAULT_MADVISE 1558 ); 1559 } 1560 } 1561 vm_map_unlock_read(map); 1562 } 1563 return (0); 1564 } 1565 1566 1567 /* 1568 * vm_map_inherit: 1569 * 1570 * Sets the inheritance of the specified address 1571 * range in the target map. Inheritance 1572 * affects how the map will be shared with 1573 * child maps at the time of vm_map_fork. 1574 */ 1575 int 1576 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1577 vm_inherit_t new_inheritance) 1578 { 1579 vm_map_entry_t entry; 1580 vm_map_entry_t temp_entry; 1581 1582 switch (new_inheritance) { 1583 case VM_INHERIT_NONE: 1584 case VM_INHERIT_COPY: 1585 case VM_INHERIT_SHARE: 1586 break; 1587 default: 1588 return (KERN_INVALID_ARGUMENT); 1589 } 1590 vm_map_lock(map); 1591 VM_MAP_RANGE_CHECK(map, start, end); 1592 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1593 entry = temp_entry; 1594 vm_map_clip_start(map, entry, start); 1595 } else 1596 entry = temp_entry->next; 1597 while ((entry != &map->header) && (entry->start < end)) { 1598 vm_map_clip_end(map, entry, end); 1599 entry->inheritance = new_inheritance; 1600 vm_map_simplify_entry(map, entry); 1601 entry = entry->next; 1602 } 1603 vm_map_unlock(map); 1604 return (KERN_SUCCESS); 1605 } 1606 1607 /* 1608 * vm_map_unwire: 1609 * 1610 * Implements both kernel and user unwiring. 1611 */ 1612 int 1613 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1614 int flags) 1615 { 1616 vm_map_entry_t entry, first_entry, tmp_entry; 1617 vm_offset_t saved_start; 1618 unsigned int last_timestamp; 1619 int rv; 1620 boolean_t need_wakeup, result, user_unwire; 1621 1622 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1623 vm_map_lock(map); 1624 VM_MAP_RANGE_CHECK(map, start, end); 1625 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1626 if (flags & VM_MAP_WIRE_HOLESOK) 1627 first_entry = first_entry->next; 1628 else { 1629 vm_map_unlock(map); 1630 return (KERN_INVALID_ADDRESS); 1631 } 1632 } 1633 last_timestamp = map->timestamp; 1634 entry = first_entry; 1635 while (entry != &map->header && entry->start < end) { 1636 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1637 /* 1638 * We have not yet clipped the entry. 1639 */ 1640 saved_start = (start >= entry->start) ? start : 1641 entry->start; 1642 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1643 if (vm_map_unlock_and_wait(map, user_unwire)) { 1644 /* 1645 * Allow interruption of user unwiring? 1646 */ 1647 } 1648 vm_map_lock(map); 1649 if (last_timestamp+1 != map->timestamp) { 1650 /* 1651 * Look again for the entry because the map was 1652 * modified while it was unlocked. 1653 * Specifically, the entry may have been 1654 * clipped, merged, or deleted. 1655 */ 1656 if (!vm_map_lookup_entry(map, saved_start, 1657 &tmp_entry)) { 1658 if (flags & VM_MAP_WIRE_HOLESOK) 1659 tmp_entry = tmp_entry->next; 1660 else { 1661 if (saved_start == start) { 1662 /* 1663 * First_entry has been deleted. 1664 */ 1665 vm_map_unlock(map); 1666 return (KERN_INVALID_ADDRESS); 1667 } 1668 end = saved_start; 1669 rv = KERN_INVALID_ADDRESS; 1670 goto done; 1671 } 1672 } 1673 if (entry == first_entry) 1674 first_entry = tmp_entry; 1675 else 1676 first_entry = NULL; 1677 entry = tmp_entry; 1678 } 1679 last_timestamp = map->timestamp; 1680 continue; 1681 } 1682 vm_map_clip_start(map, entry, start); 1683 vm_map_clip_end(map, entry, end); 1684 /* 1685 * Mark the entry in case the map lock is released. (See 1686 * above.) 1687 */ 1688 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1689 /* 1690 * Check the map for holes in the specified region. 1691 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1692 */ 1693 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1694 (entry->end < end && (entry->next == &map->header || 1695 entry->next->start > entry->end))) { 1696 end = entry->end; 1697 rv = KERN_INVALID_ADDRESS; 1698 goto done; 1699 } 1700 /* 1701 * If system unwiring, require that the entry is system wired. 1702 */ 1703 if (!user_unwire && entry->wired_count < ((entry->eflags & 1704 MAP_ENTRY_USER_WIRED) ? 2 : 1)) { 1705 end = entry->end; 1706 rv = KERN_INVALID_ARGUMENT; 1707 goto done; 1708 } 1709 entry = entry->next; 1710 } 1711 rv = KERN_SUCCESS; 1712 done: 1713 need_wakeup = FALSE; 1714 if (first_entry == NULL) { 1715 result = vm_map_lookup_entry(map, start, &first_entry); 1716 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1717 first_entry = first_entry->next; 1718 else 1719 KASSERT(result, ("vm_map_unwire: lookup failed")); 1720 } 1721 entry = first_entry; 1722 while (entry != &map->header && entry->start < end) { 1723 if (rv == KERN_SUCCESS && (!user_unwire || 1724 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 1725 if (user_unwire) 1726 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1727 entry->wired_count--; 1728 if (entry->wired_count == 0) { 1729 /* 1730 * Retain the map lock. 1731 */ 1732 vm_fault_unwire(map, entry->start, entry->end, 1733 entry->object.vm_object != NULL && 1734 entry->object.vm_object->type == OBJT_DEVICE); 1735 } 1736 } 1737 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1738 ("vm_map_unwire: in-transition flag missing")); 1739 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1740 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1741 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1742 need_wakeup = TRUE; 1743 } 1744 vm_map_simplify_entry(map, entry); 1745 entry = entry->next; 1746 } 1747 vm_map_unlock(map); 1748 if (need_wakeup) 1749 vm_map_wakeup(map); 1750 return (rv); 1751 } 1752 1753 /* 1754 * vm_map_wire: 1755 * 1756 * Implements both kernel and user wiring. 1757 */ 1758 int 1759 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1760 int flags) 1761 { 1762 vm_map_entry_t entry, first_entry, tmp_entry; 1763 vm_offset_t saved_end, saved_start; 1764 unsigned int last_timestamp; 1765 int rv; 1766 boolean_t fictitious, need_wakeup, result, user_wire; 1767 1768 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; 1769 vm_map_lock(map); 1770 VM_MAP_RANGE_CHECK(map, start, end); 1771 if (!vm_map_lookup_entry(map, start, &first_entry)) { 1772 if (flags & VM_MAP_WIRE_HOLESOK) 1773 first_entry = first_entry->next; 1774 else { 1775 vm_map_unlock(map); 1776 return (KERN_INVALID_ADDRESS); 1777 } 1778 } 1779 last_timestamp = map->timestamp; 1780 entry = first_entry; 1781 while (entry != &map->header && entry->start < end) { 1782 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1783 /* 1784 * We have not yet clipped the entry. 1785 */ 1786 saved_start = (start >= entry->start) ? start : 1787 entry->start; 1788 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1789 if (vm_map_unlock_and_wait(map, user_wire)) { 1790 /* 1791 * Allow interruption of user wiring? 1792 */ 1793 } 1794 vm_map_lock(map); 1795 if (last_timestamp + 1 != map->timestamp) { 1796 /* 1797 * Look again for the entry because the map was 1798 * modified while it was unlocked. 1799 * Specifically, the entry may have been 1800 * clipped, merged, or deleted. 1801 */ 1802 if (!vm_map_lookup_entry(map, saved_start, 1803 &tmp_entry)) { 1804 if (flags & VM_MAP_WIRE_HOLESOK) 1805 tmp_entry = tmp_entry->next; 1806 else { 1807 if (saved_start == start) { 1808 /* 1809 * first_entry has been deleted. 1810 */ 1811 vm_map_unlock(map); 1812 return (KERN_INVALID_ADDRESS); 1813 } 1814 end = saved_start; 1815 rv = KERN_INVALID_ADDRESS; 1816 goto done; 1817 } 1818 } 1819 if (entry == first_entry) 1820 first_entry = tmp_entry; 1821 else 1822 first_entry = NULL; 1823 entry = tmp_entry; 1824 } 1825 last_timestamp = map->timestamp; 1826 continue; 1827 } 1828 vm_map_clip_start(map, entry, start); 1829 vm_map_clip_end(map, entry, end); 1830 /* 1831 * Mark the entry in case the map lock is released. (See 1832 * above.) 1833 */ 1834 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1835 /* 1836 * 1837 */ 1838 if (entry->wired_count == 0) { 1839 entry->wired_count++; 1840 saved_start = entry->start; 1841 saved_end = entry->end; 1842 fictitious = entry->object.vm_object != NULL && 1843 entry->object.vm_object->type == OBJT_DEVICE; 1844 /* 1845 * Release the map lock, relying on the in-transition 1846 * mark. 1847 */ 1848 vm_map_unlock(map); 1849 rv = vm_fault_wire(map, saved_start, saved_end, 1850 user_wire, fictitious); 1851 vm_map_lock(map); 1852 if (last_timestamp + 1 != map->timestamp) { 1853 /* 1854 * Look again for the entry because the map was 1855 * modified while it was unlocked. The entry 1856 * may have been clipped, but NOT merged or 1857 * deleted. 1858 */ 1859 result = vm_map_lookup_entry(map, saved_start, 1860 &tmp_entry); 1861 KASSERT(result, ("vm_map_wire: lookup failed")); 1862 if (entry == first_entry) 1863 first_entry = tmp_entry; 1864 else 1865 first_entry = NULL; 1866 entry = tmp_entry; 1867 while (entry->end < saved_end) { 1868 if (rv != KERN_SUCCESS) { 1869 KASSERT(entry->wired_count == 1, 1870 ("vm_map_wire: bad count")); 1871 entry->wired_count = -1; 1872 } 1873 entry = entry->next; 1874 } 1875 } 1876 last_timestamp = map->timestamp; 1877 if (rv != KERN_SUCCESS) { 1878 KASSERT(entry->wired_count == 1, 1879 ("vm_map_wire: bad count")); 1880 /* 1881 * Assign an out-of-range value to represent 1882 * the failure to wire this entry. 1883 */ 1884 entry->wired_count = -1; 1885 end = entry->end; 1886 goto done; 1887 } 1888 } else if (!user_wire || 1889 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 1890 entry->wired_count++; 1891 } 1892 /* 1893 * Check the map for holes in the specified region. 1894 * If VM_MAP_WIRE_HOLESOK was specified, skip this check. 1895 */ 1896 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && 1897 (entry->end < end && (entry->next == &map->header || 1898 entry->next->start > entry->end))) { 1899 end = entry->end; 1900 rv = KERN_INVALID_ADDRESS; 1901 goto done; 1902 } 1903 entry = entry->next; 1904 } 1905 rv = KERN_SUCCESS; 1906 done: 1907 need_wakeup = FALSE; 1908 if (first_entry == NULL) { 1909 result = vm_map_lookup_entry(map, start, &first_entry); 1910 if (!result && (flags & VM_MAP_WIRE_HOLESOK)) 1911 first_entry = first_entry->next; 1912 else 1913 KASSERT(result, ("vm_map_wire: lookup failed")); 1914 } 1915 entry = first_entry; 1916 while (entry != &map->header && entry->start < end) { 1917 if (rv == KERN_SUCCESS) { 1918 if (user_wire) 1919 entry->eflags |= MAP_ENTRY_USER_WIRED; 1920 } else if (entry->wired_count == -1) { 1921 /* 1922 * Wiring failed on this entry. Thus, unwiring is 1923 * unnecessary. 1924 */ 1925 entry->wired_count = 0; 1926 } else { 1927 if (!user_wire || 1928 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) 1929 entry->wired_count--; 1930 if (entry->wired_count == 0) { 1931 /* 1932 * Retain the map lock. 1933 */ 1934 vm_fault_unwire(map, entry->start, entry->end, 1935 entry->object.vm_object != NULL && 1936 entry->object.vm_object->type == OBJT_DEVICE); 1937 } 1938 } 1939 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1940 ("vm_map_wire: in-transition flag missing")); 1941 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1942 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1943 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1944 need_wakeup = TRUE; 1945 } 1946 vm_map_simplify_entry(map, entry); 1947 entry = entry->next; 1948 } 1949 vm_map_unlock(map); 1950 if (need_wakeup) 1951 vm_map_wakeup(map); 1952 return (rv); 1953 } 1954 1955 /* 1956 * vm_map_sync 1957 * 1958 * Push any dirty cached pages in the address range to their pager. 1959 * If syncio is TRUE, dirty pages are written synchronously. 1960 * If invalidate is TRUE, any cached pages are freed as well. 1961 * 1962 * If the size of the region from start to end is zero, we are 1963 * supposed to flush all modified pages within the region containing 1964 * start. Unfortunately, a region can be split or coalesced with 1965 * neighboring regions, making it difficult to determine what the 1966 * original region was. Therefore, we approximate this requirement by 1967 * flushing the current region containing start. 1968 * 1969 * Returns an error if any part of the specified range is not mapped. 1970 */ 1971 int 1972 vm_map_sync( 1973 vm_map_t map, 1974 vm_offset_t start, 1975 vm_offset_t end, 1976 boolean_t syncio, 1977 boolean_t invalidate) 1978 { 1979 vm_map_entry_t current; 1980 vm_map_entry_t entry; 1981 vm_size_t size; 1982 vm_object_t object; 1983 vm_ooffset_t offset; 1984 1985 vm_map_lock_read(map); 1986 VM_MAP_RANGE_CHECK(map, start, end); 1987 if (!vm_map_lookup_entry(map, start, &entry)) { 1988 vm_map_unlock_read(map); 1989 return (KERN_INVALID_ADDRESS); 1990 } else if (start == end) { 1991 start = entry->start; 1992 end = entry->end; 1993 } 1994 /* 1995 * Make a first pass to check for user-wired memory and holes. 1996 */ 1997 for (current = entry; current->start < end; current = current->next) { 1998 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 1999 vm_map_unlock_read(map); 2000 return (KERN_INVALID_ARGUMENT); 2001 } 2002 if (end > current->end && 2003 (current->next == &map->header || 2004 current->end != current->next->start)) { 2005 vm_map_unlock_read(map); 2006 return (KERN_INVALID_ADDRESS); 2007 } 2008 } 2009 2010 if (invalidate) { 2011 mtx_lock(&Giant); 2012 pmap_remove(map->pmap, start, end); 2013 mtx_unlock(&Giant); 2014 } 2015 /* 2016 * Make a second pass, cleaning/uncaching pages from the indicated 2017 * objects as we go. 2018 */ 2019 for (current = entry; current->start < end; current = current->next) { 2020 offset = current->offset + (start - current->start); 2021 size = (end <= current->end ? end : current->end) - start; 2022 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2023 vm_map_t smap; 2024 vm_map_entry_t tentry; 2025 vm_size_t tsize; 2026 2027 smap = current->object.sub_map; 2028 vm_map_lock_read(smap); 2029 (void) vm_map_lookup_entry(smap, offset, &tentry); 2030 tsize = tentry->end - offset; 2031 if (tsize < size) 2032 size = tsize; 2033 object = tentry->object.vm_object; 2034 offset = tentry->offset + (offset - tentry->start); 2035 vm_map_unlock_read(smap); 2036 } else { 2037 object = current->object.vm_object; 2038 } 2039 vm_object_sync(object, offset, size, syncio, invalidate); 2040 start += size; 2041 } 2042 2043 vm_map_unlock_read(map); 2044 return (KERN_SUCCESS); 2045 } 2046 2047 /* 2048 * vm_map_entry_unwire: [ internal use only ] 2049 * 2050 * Make the region specified by this entry pageable. 2051 * 2052 * The map in question should be locked. 2053 * [This is the reason for this routine's existence.] 2054 */ 2055 static void 2056 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2057 { 2058 vm_fault_unwire(map, entry->start, entry->end, 2059 entry->object.vm_object != NULL && 2060 entry->object.vm_object->type == OBJT_DEVICE); 2061 entry->wired_count = 0; 2062 } 2063 2064 /* 2065 * vm_map_entry_delete: [ internal use only ] 2066 * 2067 * Deallocate the given entry from the target map. 2068 */ 2069 static void 2070 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 2071 { 2072 vm_object_t object; 2073 vm_pindex_t offidxstart, offidxend, count; 2074 2075 vm_map_entry_unlink(map, entry); 2076 map->size -= entry->end - entry->start; 2077 2078 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2079 (object = entry->object.vm_object) != NULL) { 2080 count = OFF_TO_IDX(entry->end - entry->start); 2081 offidxstart = OFF_TO_IDX(entry->offset); 2082 offidxend = offidxstart + count; 2083 VM_OBJECT_LOCK(object); 2084 if (object->ref_count != 1 && 2085 ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || 2086 object == kernel_object || object == kmem_object) && 2087 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2088 vm_object_collapse(object); 2089 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2090 if (object->type == OBJT_SWAP) 2091 swap_pager_freespace(object, offidxstart, count); 2092 if (offidxend >= object->size && 2093 offidxstart < object->size) 2094 object->size = offidxstart; 2095 } 2096 VM_OBJECT_UNLOCK(object); 2097 vm_object_deallocate(object); 2098 } 2099 2100 vm_map_entry_dispose(map, entry); 2101 } 2102 2103 /* 2104 * vm_map_delete: [ internal use only ] 2105 * 2106 * Deallocates the given address range from the target 2107 * map. 2108 */ 2109 int 2110 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 2111 { 2112 vm_map_entry_t entry; 2113 vm_map_entry_t first_entry; 2114 2115 /* 2116 * Find the start of the region, and clip it 2117 */ 2118 if (!vm_map_lookup_entry(map, start, &first_entry)) 2119 entry = first_entry->next; 2120 else { 2121 entry = first_entry; 2122 vm_map_clip_start(map, entry, start); 2123 } 2124 2125 /* 2126 * Save the free space hint 2127 */ 2128 if (entry == &map->header) { 2129 map->first_free = &map->header; 2130 } else if (map->first_free->start >= start) { 2131 map->first_free = entry->prev; 2132 } 2133 2134 /* 2135 * Step through all entries in this region 2136 */ 2137 while ((entry != &map->header) && (entry->start < end)) { 2138 vm_map_entry_t next; 2139 2140 /* 2141 * Wait for wiring or unwiring of an entry to complete. 2142 */ 2143 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { 2144 unsigned int last_timestamp; 2145 vm_offset_t saved_start; 2146 vm_map_entry_t tmp_entry; 2147 2148 saved_start = entry->start; 2149 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2150 last_timestamp = map->timestamp; 2151 (void) vm_map_unlock_and_wait(map, FALSE); 2152 vm_map_lock(map); 2153 if (last_timestamp + 1 != map->timestamp) { 2154 /* 2155 * Look again for the entry because the map was 2156 * modified while it was unlocked. 2157 * Specifically, the entry may have been 2158 * clipped, merged, or deleted. 2159 */ 2160 if (!vm_map_lookup_entry(map, saved_start, 2161 &tmp_entry)) 2162 entry = tmp_entry->next; 2163 else { 2164 entry = tmp_entry; 2165 vm_map_clip_start(map, entry, 2166 saved_start); 2167 } 2168 } 2169 continue; 2170 } 2171 vm_map_clip_end(map, entry, end); 2172 2173 next = entry->next; 2174 2175 /* 2176 * Unwire before removing addresses from the pmap; otherwise, 2177 * unwiring will put the entries back in the pmap. 2178 */ 2179 if (entry->wired_count != 0) { 2180 vm_map_entry_unwire(map, entry); 2181 } 2182 2183 if (!map->system_map) 2184 mtx_lock(&Giant); 2185 pmap_remove(map->pmap, entry->start, entry->end); 2186 if (!map->system_map) 2187 mtx_unlock(&Giant); 2188 2189 /* 2190 * Delete the entry (which may delete the object) only after 2191 * removing all pmap entries pointing to its pages. 2192 * (Otherwise, its page frames may be reallocated, and any 2193 * modify bits will be set in the wrong object!) 2194 */ 2195 vm_map_entry_delete(map, entry); 2196 entry = next; 2197 } 2198 return (KERN_SUCCESS); 2199 } 2200 2201 /* 2202 * vm_map_remove: 2203 * 2204 * Remove the given address range from the target map. 2205 * This is the exported form of vm_map_delete. 2206 */ 2207 int 2208 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2209 { 2210 int result, s = 0; 2211 2212 if (map == kmem_map) 2213 s = splvm(); 2214 2215 vm_map_lock(map); 2216 VM_MAP_RANGE_CHECK(map, start, end); 2217 result = vm_map_delete(map, start, end); 2218 vm_map_unlock(map); 2219 2220 if (map == kmem_map) 2221 splx(s); 2222 2223 return (result); 2224 } 2225 2226 /* 2227 * vm_map_check_protection: 2228 * 2229 * Assert that the target map allows the specified privilege on the 2230 * entire address region given. The entire region must be allocated. 2231 * 2232 * WARNING! This code does not and should not check whether the 2233 * contents of the region is accessible. For example a smaller file 2234 * might be mapped into a larger address space. 2235 * 2236 * NOTE! This code is also called by munmap(). 2237 * 2238 * The map must be locked. A read lock is sufficient. 2239 */ 2240 boolean_t 2241 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2242 vm_prot_t protection) 2243 { 2244 vm_map_entry_t entry; 2245 vm_map_entry_t tmp_entry; 2246 2247 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 2248 return (FALSE); 2249 entry = tmp_entry; 2250 2251 while (start < end) { 2252 if (entry == &map->header) 2253 return (FALSE); 2254 /* 2255 * No holes allowed! 2256 */ 2257 if (start < entry->start) 2258 return (FALSE); 2259 /* 2260 * Check protection associated with entry. 2261 */ 2262 if ((entry->protection & protection) != protection) 2263 return (FALSE); 2264 /* go to next entry */ 2265 start = entry->end; 2266 entry = entry->next; 2267 } 2268 return (TRUE); 2269 } 2270 2271 /* 2272 * vm_map_copy_entry: 2273 * 2274 * Copies the contents of the source entry to the destination 2275 * entry. The entries *must* be aligned properly. 2276 */ 2277 static void 2278 vm_map_copy_entry( 2279 vm_map_t src_map, 2280 vm_map_t dst_map, 2281 vm_map_entry_t src_entry, 2282 vm_map_entry_t dst_entry) 2283 { 2284 vm_object_t src_object; 2285 2286 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2287 return; 2288 2289 if (src_entry->wired_count == 0) { 2290 2291 /* 2292 * If the source entry is marked needs_copy, it is already 2293 * write-protected. 2294 */ 2295 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2296 pmap_protect(src_map->pmap, 2297 src_entry->start, 2298 src_entry->end, 2299 src_entry->protection & ~VM_PROT_WRITE); 2300 } 2301 2302 /* 2303 * Make a copy of the object. 2304 */ 2305 if ((src_object = src_entry->object.vm_object) != NULL) { 2306 VM_OBJECT_LOCK(src_object); 2307 if ((src_object->handle == NULL) && 2308 (src_object->type == OBJT_DEFAULT || 2309 src_object->type == OBJT_SWAP)) { 2310 vm_object_collapse(src_object); 2311 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2312 vm_object_split(src_entry); 2313 src_object = src_entry->object.vm_object; 2314 } 2315 } 2316 vm_object_reference_locked(src_object); 2317 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2318 VM_OBJECT_UNLOCK(src_object); 2319 dst_entry->object.vm_object = src_object; 2320 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2321 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2322 dst_entry->offset = src_entry->offset; 2323 } else { 2324 dst_entry->object.vm_object = NULL; 2325 dst_entry->offset = 0; 2326 } 2327 2328 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2329 dst_entry->end - dst_entry->start, src_entry->start); 2330 } else { 2331 /* 2332 * Of course, wired down pages can't be set copy-on-write. 2333 * Cause wired pages to be copied into the new map by 2334 * simulating faults (the new pages are pageable) 2335 */ 2336 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2337 } 2338 } 2339 2340 /* 2341 * vmspace_map_entry_forked: 2342 * Update the newly-forked vmspace each time a map entry is inherited 2343 * or copied. The values for vm_dsize and vm_tsize are approximate 2344 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 2345 */ 2346 static void 2347 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 2348 vm_map_entry_t entry) 2349 { 2350 vm_size_t entrysize; 2351 vm_offset_t newend; 2352 2353 entrysize = entry->end - entry->start; 2354 vm2->vm_map.size += entrysize; 2355 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 2356 vm2->vm_ssize += btoc(entrysize); 2357 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 2358 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 2359 newend = MIN(entry->end, 2360 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 2361 vm2->vm_dsize += btoc(newend - entry->start); 2362 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 2363 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 2364 newend = MIN(entry->end, 2365 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 2366 vm2->vm_tsize += btoc(newend - entry->start); 2367 } 2368 } 2369 2370 /* 2371 * vmspace_fork: 2372 * Create a new process vmspace structure and vm_map 2373 * based on those of an existing process. The new map 2374 * is based on the old map, according to the inheritance 2375 * values on the regions in that map. 2376 * 2377 * XXX It might be worth coalescing the entries added to the new vmspace. 2378 * 2379 * The source map must not be locked. 2380 */ 2381 struct vmspace * 2382 vmspace_fork(struct vmspace *vm1) 2383 { 2384 struct vmspace *vm2; 2385 vm_map_t old_map = &vm1->vm_map; 2386 vm_map_t new_map; 2387 vm_map_entry_t old_entry; 2388 vm_map_entry_t new_entry; 2389 vm_object_t object; 2390 2391 GIANT_REQUIRED; 2392 2393 vm_map_lock(old_map); 2394 old_map->infork = 1; 2395 2396 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2397 vm2->vm_taddr = vm1->vm_taddr; 2398 vm2->vm_daddr = vm1->vm_daddr; 2399 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 2400 new_map = &vm2->vm_map; /* XXX */ 2401 new_map->timestamp = 1; 2402 2403 /* Do not inherit the MAP_WIREFUTURE property. */ 2404 if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) 2405 new_map->flags &= ~MAP_WIREFUTURE; 2406 2407 old_entry = old_map->header.next; 2408 2409 while (old_entry != &old_map->header) { 2410 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2411 panic("vm_map_fork: encountered a submap"); 2412 2413 switch (old_entry->inheritance) { 2414 case VM_INHERIT_NONE: 2415 break; 2416 2417 case VM_INHERIT_SHARE: 2418 /* 2419 * Clone the entry, creating the shared object if necessary. 2420 */ 2421 object = old_entry->object.vm_object; 2422 if (object == NULL) { 2423 object = vm_object_allocate(OBJT_DEFAULT, 2424 atop(old_entry->end - old_entry->start)); 2425 old_entry->object.vm_object = object; 2426 old_entry->offset = (vm_offset_t) 0; 2427 } 2428 2429 /* 2430 * Add the reference before calling vm_object_shadow 2431 * to insure that a shadow object is created. 2432 */ 2433 vm_object_reference(object); 2434 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2435 vm_object_shadow(&old_entry->object.vm_object, 2436 &old_entry->offset, 2437 atop(old_entry->end - old_entry->start)); 2438 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2439 /* Transfer the second reference too. */ 2440 vm_object_reference( 2441 old_entry->object.vm_object); 2442 vm_object_deallocate(object); 2443 object = old_entry->object.vm_object; 2444 } 2445 VM_OBJECT_LOCK(object); 2446 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2447 VM_OBJECT_UNLOCK(object); 2448 2449 /* 2450 * Clone the entry, referencing the shared object. 2451 */ 2452 new_entry = vm_map_entry_create(new_map); 2453 *new_entry = *old_entry; 2454 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2455 new_entry->wired_count = 0; 2456 2457 /* 2458 * Insert the entry into the new map -- we know we're 2459 * inserting at the end of the new map. 2460 */ 2461 vm_map_entry_link(new_map, new_map->header.prev, 2462 new_entry); 2463 vmspace_map_entry_forked(vm1, vm2, new_entry); 2464 2465 /* 2466 * Update the physical map 2467 */ 2468 pmap_copy(new_map->pmap, old_map->pmap, 2469 new_entry->start, 2470 (old_entry->end - old_entry->start), 2471 old_entry->start); 2472 break; 2473 2474 case VM_INHERIT_COPY: 2475 /* 2476 * Clone the entry and link into the map. 2477 */ 2478 new_entry = vm_map_entry_create(new_map); 2479 *new_entry = *old_entry; 2480 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2481 new_entry->wired_count = 0; 2482 new_entry->object.vm_object = NULL; 2483 vm_map_entry_link(new_map, new_map->header.prev, 2484 new_entry); 2485 vmspace_map_entry_forked(vm1, vm2, new_entry); 2486 vm_map_copy_entry(old_map, new_map, old_entry, 2487 new_entry); 2488 break; 2489 } 2490 old_entry = old_entry->next; 2491 } 2492 2493 old_map->infork = 0; 2494 vm_map_unlock(old_map); 2495 2496 return (vm2); 2497 } 2498 2499 int 2500 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2501 vm_prot_t prot, vm_prot_t max, int cow) 2502 { 2503 vm_map_entry_t new_entry, prev_entry; 2504 vm_offset_t bot, top; 2505 vm_size_t init_ssize; 2506 int orient, rv; 2507 rlim_t vmemlim; 2508 2509 /* 2510 * The stack orientation is piggybacked with the cow argument. 2511 * Extract it into orient and mask the cow argument so that we 2512 * don't pass it around further. 2513 * NOTE: We explicitly allow bi-directional stacks. 2514 */ 2515 orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); 2516 cow &= ~orient; 2517 KASSERT(orient != 0, ("No stack grow direction")); 2518 2519 if (addrbos < vm_map_min(map) || addrbos > map->max_offset) 2520 return (KERN_NO_SPACE); 2521 2522 init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; 2523 2524 PROC_LOCK(curthread->td_proc); 2525 vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); 2526 PROC_UNLOCK(curthread->td_proc); 2527 2528 vm_map_lock(map); 2529 2530 /* If addr is already mapped, no go */ 2531 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2532 vm_map_unlock(map); 2533 return (KERN_NO_SPACE); 2534 } 2535 2536 /* If we would blow our VMEM resource limit, no go */ 2537 if (map->size + init_ssize > vmemlim) { 2538 vm_map_unlock(map); 2539 return (KERN_NO_SPACE); 2540 } 2541 2542 /* 2543 * If we can't accomodate max_ssize in the current mapping, no go. 2544 * However, we need to be aware that subsequent user mappings might 2545 * map into the space we have reserved for stack, and currently this 2546 * space is not protected. 2547 * 2548 * Hopefully we will at least detect this condition when we try to 2549 * grow the stack. 2550 */ 2551 if ((prev_entry->next != &map->header) && 2552 (prev_entry->next->start < addrbos + max_ssize)) { 2553 vm_map_unlock(map); 2554 return (KERN_NO_SPACE); 2555 } 2556 2557 /* 2558 * We initially map a stack of only init_ssize. We will grow as 2559 * needed later. Depending on the orientation of the stack (i.e. 2560 * the grow direction) we either map at the top of the range, the 2561 * bottom of the range or in the middle. 2562 * 2563 * Note: we would normally expect prot and max to be VM_PROT_ALL, 2564 * and cow to be 0. Possibly we should eliminate these as input 2565 * parameters, and just pass these values here in the insert call. 2566 */ 2567 if (orient == MAP_STACK_GROWS_DOWN) 2568 bot = addrbos + max_ssize - init_ssize; 2569 else if (orient == MAP_STACK_GROWS_UP) 2570 bot = addrbos; 2571 else 2572 bot = round_page(addrbos + max_ssize/2 - init_ssize/2); 2573 top = bot + init_ssize; 2574 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 2575 2576 /* Now set the avail_ssize amount. */ 2577 if (rv == KERN_SUCCESS) { 2578 if (prev_entry != &map->header) 2579 vm_map_clip_end(map, prev_entry, bot); 2580 new_entry = prev_entry->next; 2581 if (new_entry->end != top || new_entry->start != bot) 2582 panic("Bad entry start/end for new stack entry"); 2583 2584 new_entry->avail_ssize = max_ssize - init_ssize; 2585 if (orient & MAP_STACK_GROWS_DOWN) 2586 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2587 if (orient & MAP_STACK_GROWS_UP) 2588 new_entry->eflags |= MAP_ENTRY_GROWS_UP; 2589 } 2590 2591 vm_map_unlock(map); 2592 return (rv); 2593 } 2594 2595 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2596 * desired address is already mapped, or if we successfully grow 2597 * the stack. Also returns KERN_SUCCESS if addr is outside the 2598 * stack range (this is strange, but preserves compatibility with 2599 * the grow function in vm_machdep.c). 2600 */ 2601 int 2602 vm_map_growstack(struct proc *p, vm_offset_t addr) 2603 { 2604 vm_map_entry_t next_entry, prev_entry; 2605 vm_map_entry_t new_entry, stack_entry; 2606 struct vmspace *vm = p->p_vmspace; 2607 vm_map_t map = &vm->vm_map; 2608 vm_offset_t end; 2609 size_t grow_amount, max_grow; 2610 rlim_t stacklim, vmemlim; 2611 int is_procstack, rv; 2612 2613 Retry: 2614 PROC_LOCK(p); 2615 stacklim = lim_cur(p, RLIMIT_STACK); 2616 vmemlim = lim_cur(p, RLIMIT_VMEM); 2617 PROC_UNLOCK(p); 2618 2619 vm_map_lock_read(map); 2620 2621 /* If addr is already in the entry range, no need to grow.*/ 2622 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2623 vm_map_unlock_read(map); 2624 return (KERN_SUCCESS); 2625 } 2626 2627 next_entry = prev_entry->next; 2628 if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { 2629 /* 2630 * This entry does not grow upwards. Since the address lies 2631 * beyond this entry, the next entry (if one exists) has to 2632 * be a downward growable entry. The entry list header is 2633 * never a growable entry, so it suffices to check the flags. 2634 */ 2635 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { 2636 vm_map_unlock_read(map); 2637 return (KERN_SUCCESS); 2638 } 2639 stack_entry = next_entry; 2640 } else { 2641 /* 2642 * This entry grows upward. If the next entry does not at 2643 * least grow downwards, this is the entry we need to grow. 2644 * otherwise we have two possible choices and we have to 2645 * select one. 2646 */ 2647 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { 2648 /* 2649 * We have two choices; grow the entry closest to 2650 * the address to minimize the amount of growth. 2651 */ 2652 if (addr - prev_entry->end <= next_entry->start - addr) 2653 stack_entry = prev_entry; 2654 else 2655 stack_entry = next_entry; 2656 } else 2657 stack_entry = prev_entry; 2658 } 2659 2660 if (stack_entry == next_entry) { 2661 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); 2662 KASSERT(addr < stack_entry->start, ("foo")); 2663 end = (prev_entry != &map->header) ? prev_entry->end : 2664 stack_entry->start - stack_entry->avail_ssize; 2665 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); 2666 max_grow = stack_entry->start - end; 2667 } else { 2668 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); 2669 KASSERT(addr >= stack_entry->end, ("foo")); 2670 end = (next_entry != &map->header) ? next_entry->start : 2671 stack_entry->end + stack_entry->avail_ssize; 2672 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); 2673 max_grow = end - stack_entry->end; 2674 } 2675 2676 if (grow_amount > stack_entry->avail_ssize) { 2677 vm_map_unlock_read(map); 2678 return (KERN_NO_SPACE); 2679 } 2680 2681 /* 2682 * If there is no longer enough space between the entries nogo, and 2683 * adjust the available space. Note: this should only happen if the 2684 * user has mapped into the stack area after the stack was created, 2685 * and is probably an error. 2686 * 2687 * This also effectively destroys any guard page the user might have 2688 * intended by limiting the stack size. 2689 */ 2690 if (grow_amount > max_grow) { 2691 if (vm_map_lock_upgrade(map)) 2692 goto Retry; 2693 2694 stack_entry->avail_ssize = max_grow; 2695 2696 vm_map_unlock(map); 2697 return (KERN_NO_SPACE); 2698 } 2699 2700 is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; 2701 2702 /* 2703 * If this is the main process stack, see if we're over the stack 2704 * limit. 2705 */ 2706 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2707 vm_map_unlock_read(map); 2708 return (KERN_NO_SPACE); 2709 } 2710 2711 /* Round up the grow amount modulo SGROWSIZ */ 2712 grow_amount = roundup (grow_amount, sgrowsiz); 2713 if (grow_amount > stack_entry->avail_ssize) 2714 grow_amount = stack_entry->avail_ssize; 2715 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 2716 grow_amount = stacklim - ctob(vm->vm_ssize); 2717 } 2718 2719 /* If we would blow our VMEM resource limit, no go */ 2720 if (map->size + grow_amount > vmemlim) { 2721 vm_map_unlock_read(map); 2722 return (KERN_NO_SPACE); 2723 } 2724 2725 if (vm_map_lock_upgrade(map)) 2726 goto Retry; 2727 2728 if (stack_entry == next_entry) { 2729 /* 2730 * Growing downward. 2731 */ 2732 /* Get the preliminary new entry start value */ 2733 addr = stack_entry->start - grow_amount; 2734 2735 /* 2736 * If this puts us into the previous entry, cut back our 2737 * growth to the available space. Also, see the note above. 2738 */ 2739 if (addr < end) { 2740 stack_entry->avail_ssize = max_grow; 2741 addr = end; 2742 } 2743 2744 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2745 p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); 2746 2747 /* Adjust the available stack space by the amount we grew. */ 2748 if (rv == KERN_SUCCESS) { 2749 if (prev_entry != &map->header) 2750 vm_map_clip_end(map, prev_entry, addr); 2751 new_entry = prev_entry->next; 2752 KASSERT(new_entry == stack_entry->prev, ("foo")); 2753 KASSERT(new_entry->end == stack_entry->start, ("foo")); 2754 KASSERT(new_entry->start == addr, ("foo")); 2755 grow_amount = new_entry->end - new_entry->start; 2756 new_entry->avail_ssize = stack_entry->avail_ssize - 2757 grow_amount; 2758 stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; 2759 new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; 2760 } 2761 } else { 2762 /* 2763 * Growing upward. 2764 */ 2765 addr = stack_entry->end + grow_amount; 2766 2767 /* 2768 * If this puts us into the next entry, cut back our growth 2769 * to the available space. Also, see the note above. 2770 */ 2771 if (addr > end) { 2772 stack_entry->avail_ssize = end - stack_entry->end; 2773 addr = end; 2774 } 2775 2776 grow_amount = addr - stack_entry->end; 2777 2778 /* Grow the underlying object if applicable. */ 2779 if (stack_entry->object.vm_object == NULL || 2780 vm_object_coalesce(stack_entry->object.vm_object, 2781 stack_entry->offset, 2782 (vm_size_t)(stack_entry->end - stack_entry->start), 2783 (vm_size_t)grow_amount)) { 2784 map->size += (addr - stack_entry->end); 2785 /* Update the current entry. */ 2786 stack_entry->end = addr; 2787 stack_entry->avail_ssize -= grow_amount; 2788 rv = KERN_SUCCESS; 2789 2790 if (next_entry != &map->header) 2791 vm_map_clip_start(map, next_entry, addr); 2792 } else 2793 rv = KERN_FAILURE; 2794 } 2795 2796 if (rv == KERN_SUCCESS && is_procstack) 2797 vm->vm_ssize += btoc(grow_amount); 2798 2799 vm_map_unlock(map); 2800 2801 /* 2802 * Heed the MAP_WIREFUTURE flag if it was set for this process. 2803 */ 2804 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { 2805 vm_map_wire(map, 2806 (stack_entry == next_entry) ? addr : addr - grow_amount, 2807 (stack_entry == next_entry) ? stack_entry->start : addr, 2808 (p->p_flag & P_SYSTEM) 2809 ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES 2810 : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 2811 } 2812 2813 return (rv); 2814 } 2815 2816 /* 2817 * Unshare the specified VM space for exec. If other processes are 2818 * mapped to it, then create a new one. The new vmspace is null. 2819 */ 2820 void 2821 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 2822 { 2823 struct vmspace *oldvmspace = p->p_vmspace; 2824 struct vmspace *newvmspace; 2825 2826 GIANT_REQUIRED; 2827 newvmspace = vmspace_alloc(minuser, maxuser); 2828 newvmspace->vm_swrss = oldvmspace->vm_swrss; 2829 /* 2830 * This code is written like this for prototype purposes. The 2831 * goal is to avoid running down the vmspace here, but let the 2832 * other process's that are still using the vmspace to finally 2833 * run it down. Even though there is little or no chance of blocking 2834 * here, it is a good idea to keep this form for future mods. 2835 */ 2836 p->p_vmspace = newvmspace; 2837 if (p == curthread->td_proc) /* XXXKSE ? */ 2838 pmap_activate(curthread); 2839 vmspace_free(oldvmspace); 2840 } 2841 2842 /* 2843 * Unshare the specified VM space for forcing COW. This 2844 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2845 */ 2846 void 2847 vmspace_unshare(struct proc *p) 2848 { 2849 struct vmspace *oldvmspace = p->p_vmspace; 2850 struct vmspace *newvmspace; 2851 2852 GIANT_REQUIRED; 2853 if (oldvmspace->vm_refcnt == 1) 2854 return; 2855 newvmspace = vmspace_fork(oldvmspace); 2856 p->p_vmspace = newvmspace; 2857 if (p == curthread->td_proc) /* XXXKSE ? */ 2858 pmap_activate(curthread); 2859 vmspace_free(oldvmspace); 2860 } 2861 2862 /* 2863 * vm_map_lookup: 2864 * 2865 * Finds the VM object, offset, and 2866 * protection for a given virtual address in the 2867 * specified map, assuming a page fault of the 2868 * type specified. 2869 * 2870 * Leaves the map in question locked for read; return 2871 * values are guaranteed until a vm_map_lookup_done 2872 * call is performed. Note that the map argument 2873 * is in/out; the returned map must be used in 2874 * the call to vm_map_lookup_done. 2875 * 2876 * A handle (out_entry) is returned for use in 2877 * vm_map_lookup_done, to make that fast. 2878 * 2879 * If a lookup is requested with "write protection" 2880 * specified, the map may be changed to perform virtual 2881 * copying operations, although the data referenced will 2882 * remain the same. 2883 */ 2884 int 2885 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2886 vm_offset_t vaddr, 2887 vm_prot_t fault_typea, 2888 vm_map_entry_t *out_entry, /* OUT */ 2889 vm_object_t *object, /* OUT */ 2890 vm_pindex_t *pindex, /* OUT */ 2891 vm_prot_t *out_prot, /* OUT */ 2892 boolean_t *wired) /* OUT */ 2893 { 2894 vm_map_entry_t entry; 2895 vm_map_t map = *var_map; 2896 vm_prot_t prot; 2897 vm_prot_t fault_type = fault_typea; 2898 2899 RetryLookup:; 2900 /* 2901 * Lookup the faulting address. 2902 */ 2903 2904 vm_map_lock_read(map); 2905 #define RETURN(why) \ 2906 { \ 2907 vm_map_unlock_read(map); \ 2908 return (why); \ 2909 } 2910 2911 /* 2912 * If the map has an interesting hint, try it before calling full 2913 * blown lookup routine. 2914 */ 2915 entry = map->root; 2916 *out_entry = entry; 2917 if (entry == NULL || 2918 (vaddr < entry->start) || (vaddr >= entry->end)) { 2919 /* 2920 * Entry was either not a valid hint, or the vaddr was not 2921 * contained in the entry, so do a full lookup. 2922 */ 2923 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 2924 RETURN(KERN_INVALID_ADDRESS); 2925 2926 entry = *out_entry; 2927 } 2928 2929 /* 2930 * Handle submaps. 2931 */ 2932 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2933 vm_map_t old_map = map; 2934 2935 *var_map = map = entry->object.sub_map; 2936 vm_map_unlock_read(old_map); 2937 goto RetryLookup; 2938 } 2939 2940 /* 2941 * Check whether this task is allowed to have this page. 2942 * Note the special case for MAP_ENTRY_COW 2943 * pages with an override. This is to implement a forced 2944 * COW for debuggers. 2945 */ 2946 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2947 prot = entry->max_protection; 2948 else 2949 prot = entry->protection; 2950 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2951 if ((fault_type & prot) != fault_type) { 2952 RETURN(KERN_PROTECTION_FAILURE); 2953 } 2954 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2955 (entry->eflags & MAP_ENTRY_COW) && 2956 (fault_type & VM_PROT_WRITE) && 2957 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2958 RETURN(KERN_PROTECTION_FAILURE); 2959 } 2960 2961 /* 2962 * If this page is not pageable, we have to get it for all possible 2963 * accesses. 2964 */ 2965 *wired = (entry->wired_count != 0); 2966 if (*wired) 2967 prot = fault_type = entry->protection; 2968 2969 /* 2970 * If the entry was copy-on-write, we either ... 2971 */ 2972 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2973 /* 2974 * If we want to write the page, we may as well handle that 2975 * now since we've got the map locked. 2976 * 2977 * If we don't need to write the page, we just demote the 2978 * permissions allowed. 2979 */ 2980 if (fault_type & VM_PROT_WRITE) { 2981 /* 2982 * Make a new object, and place it in the object 2983 * chain. Note that no new references have appeared 2984 * -- one just moved from the map to the new 2985 * object. 2986 */ 2987 if (vm_map_lock_upgrade(map)) 2988 goto RetryLookup; 2989 2990 vm_object_shadow( 2991 &entry->object.vm_object, 2992 &entry->offset, 2993 atop(entry->end - entry->start)); 2994 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2995 2996 vm_map_lock_downgrade(map); 2997 } else { 2998 /* 2999 * We're attempting to read a copy-on-write page -- 3000 * don't allow writes. 3001 */ 3002 prot &= ~VM_PROT_WRITE; 3003 } 3004 } 3005 3006 /* 3007 * Create an object if necessary. 3008 */ 3009 if (entry->object.vm_object == NULL && 3010 !map->system_map) { 3011 if (vm_map_lock_upgrade(map)) 3012 goto RetryLookup; 3013 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3014 atop(entry->end - entry->start)); 3015 entry->offset = 0; 3016 vm_map_lock_downgrade(map); 3017 } 3018 3019 /* 3020 * Return the object/offset from this entry. If the entry was 3021 * copy-on-write or empty, it has been fixed up. 3022 */ 3023 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3024 *object = entry->object.vm_object; 3025 3026 /* 3027 * Return whether this is the only map sharing this data. 3028 */ 3029 *out_prot = prot; 3030 return (KERN_SUCCESS); 3031 3032 #undef RETURN 3033 } 3034 3035 /* 3036 * vm_map_lookup_done: 3037 * 3038 * Releases locks acquired by a vm_map_lookup 3039 * (according to the handle returned by that lookup). 3040 */ 3041 void 3042 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 3043 { 3044 /* 3045 * Unlock the main-level map 3046 */ 3047 vm_map_unlock_read(map); 3048 } 3049 3050 #include "opt_ddb.h" 3051 #ifdef DDB 3052 #include <sys/kernel.h> 3053 3054 #include <ddb/ddb.h> 3055 3056 /* 3057 * vm_map_print: [ debug ] 3058 */ 3059 DB_SHOW_COMMAND(map, vm_map_print) 3060 { 3061 static int nlines; 3062 /* XXX convert args. */ 3063 vm_map_t map = (vm_map_t)addr; 3064 boolean_t full = have_addr; 3065 3066 vm_map_entry_t entry; 3067 3068 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3069 (void *)map, 3070 (void *)map->pmap, map->nentries, map->timestamp); 3071 nlines++; 3072 3073 if (!full && db_indent) 3074 return; 3075 3076 db_indent += 2; 3077 for (entry = map->header.next; entry != &map->header; 3078 entry = entry->next) { 3079 db_iprintf("map entry %p: start=%p, end=%p\n", 3080 (void *)entry, (void *)entry->start, (void *)entry->end); 3081 nlines++; 3082 { 3083 static char *inheritance_name[4] = 3084 {"share", "copy", "none", "donate_copy"}; 3085 3086 db_iprintf(" prot=%x/%x/%s", 3087 entry->protection, 3088 entry->max_protection, 3089 inheritance_name[(int)(unsigned char)entry->inheritance]); 3090 if (entry->wired_count != 0) 3091 db_printf(", wired"); 3092 } 3093 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3094 db_printf(", share=%p, offset=0x%jx\n", 3095 (void *)entry->object.sub_map, 3096 (uintmax_t)entry->offset); 3097 nlines++; 3098 if ((entry->prev == &map->header) || 3099 (entry->prev->object.sub_map != 3100 entry->object.sub_map)) { 3101 db_indent += 2; 3102 vm_map_print((db_expr_t)(intptr_t) 3103 entry->object.sub_map, 3104 full, 0, (char *)0); 3105 db_indent -= 2; 3106 } 3107 } else { 3108 db_printf(", object=%p, offset=0x%jx", 3109 (void *)entry->object.vm_object, 3110 (uintmax_t)entry->offset); 3111 if (entry->eflags & MAP_ENTRY_COW) 3112 db_printf(", copy (%s)", 3113 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3114 db_printf("\n"); 3115 nlines++; 3116 3117 if ((entry->prev == &map->header) || 3118 (entry->prev->object.vm_object != 3119 entry->object.vm_object)) { 3120 db_indent += 2; 3121 vm_object_print((db_expr_t)(intptr_t) 3122 entry->object.vm_object, 3123 full, 0, (char *)0); 3124 nlines += 4; 3125 db_indent -= 2; 3126 } 3127 } 3128 } 3129 db_indent -= 2; 3130 if (db_indent == 0) 3131 nlines = 0; 3132 } 3133 3134 3135 DB_SHOW_COMMAND(procvm, procvm) 3136 { 3137 struct proc *p; 3138 3139 if (have_addr) { 3140 p = (struct proc *) addr; 3141 } else { 3142 p = curproc; 3143 } 3144 3145 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3146 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3147 (void *)vmspace_pmap(p->p_vmspace)); 3148 3149 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3150 } 3151 3152 #endif /* DDB */ 3153