1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD$ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/lock.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/vmmeter.h> 77 #include <sys/mman.h> 78 #include <sys/vnode.h> 79 #include <sys/resourcevar.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_zone.h> 91 #include <vm/swap_pager.h> 92 93 /* 94 * Virtual memory maps provide for the mapping, protection, 95 * and sharing of virtual memory objects. In addition, 96 * this module provides for an efficient virtual copy of 97 * memory from one map to another. 98 * 99 * Synchronization is required prior to most operations. 100 * 101 * Maps consist of an ordered doubly-linked list of simple 102 * entries; a single hint is used to speed up lookups. 103 * 104 * Since portions of maps are specified by start/end addresses, 105 * which may not align with existing map entries, all 106 * routines merely "clip" entries to these start/end values. 107 * [That is, an entry is split into two, bordering at a 108 * start or end value.] Note that these clippings may not 109 * always be necessary (as the two resulting entries are then 110 * not changed); however, the clipping is done for convenience. 111 * 112 * As mentioned above, virtual copy operations are performed 113 * by copying VM object references from one map to 114 * another, and then marking both regions as copy-on-write. 115 */ 116 117 /* 118 * vm_map_startup: 119 * 120 * Initialize the vm_map module. Must be called before 121 * any other vm_map routines. 122 * 123 * Map and entry structures are allocated from the general 124 * purpose memory pool with some exceptions: 125 * 126 * - The kernel map and kmem submap are allocated statically. 127 * - Kernel map entries are allocated out of a static pool. 128 * 129 * These restrictions are necessary since malloc() uses the 130 * maps and requires map entries. 131 */ 132 133 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store; 134 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone; 135 static struct vm_object kmapentobj, mapentobj, mapobj; 136 137 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 138 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT]; 139 static struct vm_map map_init[MAX_KMAP]; 140 141 void 142 vm_map_startup(void) 143 { 144 mapzone = &mapzone_store; 145 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 146 map_init, MAX_KMAP); 147 kmapentzone = &kmapentzone_store; 148 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry), 149 kmap_entry_init, MAX_KMAPENT); 150 mapentzone = &mapentzone_store; 151 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 152 map_entry_init, MAX_MAPENT); 153 } 154 155 /* 156 * Allocate a vmspace structure, including a vm_map and pmap, 157 * and initialize those structures. The refcnt is set to 1. 158 * The remaining fields must be initialized by the caller. 159 */ 160 struct vmspace * 161 vmspace_alloc(min, max) 162 vm_offset_t min, max; 163 { 164 struct vmspace *vm; 165 166 GIANT_REQUIRED; 167 vm = zalloc(vmspace_zone); 168 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 169 vm_map_init(&vm->vm_map, min, max); 170 pmap_pinit(vmspace_pmap(vm)); 171 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 172 vm->vm_refcnt = 1; 173 vm->vm_shm = NULL; 174 return (vm); 175 } 176 177 void 178 vm_init2(void) 179 { 180 zinitna(kmapentzone, &kmapentobj, 181 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1); 182 zinitna(mapentzone, &mapentobj, 183 NULL, 0, 0, 0, 1); 184 zinitna(mapzone, &mapobj, 185 NULL, 0, 0, 0, 1); 186 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3); 187 pmap_init2(); 188 vm_object_init2(); 189 } 190 191 void 192 vmspace_free(struct vmspace *vm) 193 { 194 GIANT_REQUIRED; 195 196 if (vm->vm_refcnt == 0) 197 panic("vmspace_free: attempt to free already freed vmspace"); 198 199 if (--vm->vm_refcnt == 0) { 200 201 CTR1(KTR_VM, "vmspace_free: %p", vm); 202 /* 203 * Lock the map, to wait out all other references to it. 204 * Delete all of the mappings and pages they hold, then call 205 * the pmap module to reclaim anything left. 206 */ 207 vm_map_lock(&vm->vm_map); 208 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 209 vm->vm_map.max_offset); 210 vm_map_unlock(&vm->vm_map); 211 212 pmap_release(vmspace_pmap(vm)); 213 vm_map_destroy(&vm->vm_map); 214 zfree(vmspace_zone, vm); 215 } 216 } 217 218 /* 219 * vmspace_swap_count() - count the approximate swap useage in pages for a 220 * vmspace. 221 * 222 * Swap useage is determined by taking the proportional swap used by 223 * VM objects backing the VM map. To make up for fractional losses, 224 * if the VM object has any swap use at all the associated map entries 225 * count for at least 1 swap page. 226 */ 227 int 228 vmspace_swap_count(struct vmspace *vmspace) 229 { 230 vm_map_t map = &vmspace->vm_map; 231 vm_map_entry_t cur; 232 int count = 0; 233 234 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 235 vm_object_t object; 236 237 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 238 (object = cur->object.vm_object) != NULL && 239 object->type == OBJT_SWAP 240 ) { 241 int n = (cur->end - cur->start) / PAGE_SIZE; 242 243 if (object->un_pager.swp.swp_bcount) { 244 count += object->un_pager.swp.swp_bcount * 245 SWAP_META_PAGES * n / object->size + 1; 246 } 247 } 248 } 249 return(count); 250 } 251 252 u_char 253 vm_map_entry_behavior(struct vm_map_entry *entry) 254 { 255 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 256 } 257 258 void 259 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 260 { 261 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 262 (behavior & MAP_ENTRY_BEHAV_MASK); 263 } 264 265 void 266 vm_map_lock(vm_map_t map) 267 { 268 vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map); 269 if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0) 270 panic("vm_map_lock: failed to get lock"); 271 map->timestamp++; 272 } 273 274 void 275 vm_map_unlock(vm_map_t map) 276 { 277 vm_map_printf("locking map LK_RELEASE: %p\n", map); 278 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); 279 } 280 281 void 282 vm_map_lock_read(vm_map_t map) 283 { 284 vm_map_printf("locking map LK_SHARED: %p\n", map); 285 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread); 286 } 287 288 void 289 vm_map_unlock_read(vm_map_t map) 290 { 291 vm_map_printf("locking map LK_RELEASE: %p\n", map); 292 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); 293 } 294 295 static __inline__ int 296 _vm_map_lock_upgrade(vm_map_t map, struct thread *td) { 297 int error; 298 299 vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map); 300 error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td); 301 if (error == 0) 302 map->timestamp++; 303 return error; 304 } 305 306 int 307 vm_map_lock_upgrade(vm_map_t map) 308 { 309 return(_vm_map_lock_upgrade(map, curthread)); 310 } 311 312 void 313 vm_map_lock_downgrade(vm_map_t map) 314 { 315 vm_map_printf("locking map LK_DOWNGRADE: %p\n", map); 316 lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread); 317 } 318 319 void 320 vm_map_set_recursive(vm_map_t map) 321 { 322 mtx_lock((map)->lock.lk_interlock); 323 map->lock.lk_flags |= LK_CANRECURSE; 324 mtx_unlock((map)->lock.lk_interlock); 325 } 326 327 void 328 vm_map_clear_recursive(vm_map_t map) 329 { 330 mtx_lock((map)->lock.lk_interlock); 331 map->lock.lk_flags &= ~LK_CANRECURSE; 332 mtx_unlock((map)->lock.lk_interlock); 333 } 334 335 vm_offset_t 336 vm_map_min(vm_map_t map) 337 { 338 return(map->min_offset); 339 } 340 341 vm_offset_t 342 vm_map_max(vm_map_t map) 343 { 344 return(map->max_offset); 345 } 346 347 struct pmap * 348 vm_map_pmap(vm_map_t map) 349 { 350 return(map->pmap); 351 } 352 353 struct pmap * 354 vmspace_pmap(struct vmspace *vmspace) 355 { 356 return &vmspace->vm_pmap; 357 } 358 359 long 360 vmspace_resident_count(struct vmspace *vmspace) 361 { 362 return pmap_resident_count(vmspace_pmap(vmspace)); 363 } 364 365 /* 366 * vm_map_create: 367 * 368 * Creates and returns a new empty VM map with 369 * the given physical map structure, and having 370 * the given lower and upper address bounds. 371 */ 372 vm_map_t 373 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 374 { 375 vm_map_t result; 376 377 GIANT_REQUIRED; 378 379 result = zalloc(mapzone); 380 CTR1(KTR_VM, "vm_map_create: %p", result); 381 vm_map_init(result, min, max); 382 result->pmap = pmap; 383 return (result); 384 } 385 386 /* 387 * Initialize an existing vm_map structure 388 * such as that in the vmspace structure. 389 * The pmap is set elsewhere. 390 */ 391 void 392 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max) 393 { 394 GIANT_REQUIRED; 395 396 map->header.next = map->header.prev = &map->header; 397 map->nentries = 0; 398 map->size = 0; 399 map->system_map = 0; 400 map->infork = 0; 401 map->min_offset = min; 402 map->max_offset = max; 403 map->first_free = &map->header; 404 map->hint = &map->header; 405 map->timestamp = 0; 406 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE); 407 } 408 409 void 410 vm_map_destroy(map) 411 struct vm_map *map; 412 { 413 GIANT_REQUIRED; 414 lockdestroy(&map->lock); 415 } 416 417 /* 418 * vm_map_entry_dispose: [ internal use only ] 419 * 420 * Inverse of vm_map_entry_create. 421 */ 422 static void 423 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 424 { 425 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry); 426 } 427 428 /* 429 * vm_map_entry_create: [ internal use only ] 430 * 431 * Allocates a VM map entry for insertion. 432 * No entry fields are filled in. 433 */ 434 static vm_map_entry_t 435 vm_map_entry_create(vm_map_t map) 436 { 437 vm_map_entry_t new_entry; 438 439 new_entry = zalloc((map->system_map || !mapentzone) ? 440 kmapentzone : mapentzone); 441 if (new_entry == NULL) 442 panic("vm_map_entry_create: kernel resources exhausted"); 443 return(new_entry); 444 } 445 446 /* 447 * vm_map_entry_{un,}link: 448 * 449 * Insert/remove entries from maps. 450 */ 451 static __inline void 452 vm_map_entry_link(vm_map_t map, 453 vm_map_entry_t after_where, 454 vm_map_entry_t entry) 455 { 456 457 CTR4(KTR_VM, 458 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, 459 map->nentries, entry, after_where); 460 map->nentries++; 461 entry->prev = after_where; 462 entry->next = after_where->next; 463 entry->next->prev = entry; 464 after_where->next = entry; 465 } 466 467 static __inline void 468 vm_map_entry_unlink(vm_map_t map, 469 vm_map_entry_t entry) 470 { 471 vm_map_entry_t prev = entry->prev; 472 vm_map_entry_t next = entry->next; 473 474 next->prev = prev; 475 prev->next = next; 476 map->nentries--; 477 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 478 map->nentries, entry); 479 } 480 481 /* 482 * SAVE_HINT: 483 * 484 * Saves the specified entry as the hint for 485 * future lookups. 486 */ 487 #define SAVE_HINT(map,value) \ 488 (map)->hint = (value); 489 490 /* 491 * vm_map_lookup_entry: [ internal use only ] 492 * 493 * Finds the map entry containing (or 494 * immediately preceding) the specified address 495 * in the given map; the entry is returned 496 * in the "entry" parameter. The boolean 497 * result indicates whether the address is 498 * actually contained in the map. 499 */ 500 boolean_t 501 vm_map_lookup_entry( 502 vm_map_t map, 503 vm_offset_t address, 504 vm_map_entry_t *entry) /* OUT */ 505 { 506 vm_map_entry_t cur; 507 vm_map_entry_t last; 508 509 GIANT_REQUIRED; 510 /* 511 * Start looking either from the head of the list, or from the hint. 512 */ 513 514 cur = map->hint; 515 516 if (cur == &map->header) 517 cur = cur->next; 518 519 if (address >= cur->start) { 520 /* 521 * Go from hint to end of list. 522 * 523 * But first, make a quick check to see if we are already looking 524 * at the entry we want (which is usually the case). Note also 525 * that we don't need to save the hint here... it is the same 526 * hint (unless we are at the header, in which case the hint 527 * didn't buy us anything anyway). 528 */ 529 last = &map->header; 530 if ((cur != last) && (cur->end > address)) { 531 *entry = cur; 532 return (TRUE); 533 } 534 } else { 535 /* 536 * Go from start to hint, *inclusively* 537 */ 538 last = cur->next; 539 cur = map->header.next; 540 } 541 542 /* 543 * Search linearly 544 */ 545 546 while (cur != last) { 547 if (cur->end > address) { 548 if (address >= cur->start) { 549 /* 550 * Save this lookup for future hints, and 551 * return 552 */ 553 554 *entry = cur; 555 SAVE_HINT(map, cur); 556 return (TRUE); 557 } 558 break; 559 } 560 cur = cur->next; 561 } 562 *entry = cur->prev; 563 SAVE_HINT(map, *entry); 564 return (FALSE); 565 } 566 567 /* 568 * vm_map_insert: 569 * 570 * Inserts the given whole VM object into the target 571 * map at the specified address range. The object's 572 * size should match that of the address range. 573 * 574 * Requires that the map be locked, and leaves it so. 575 * 576 * If object is non-NULL, ref count must be bumped by caller 577 * prior to making call to account for the new entry. 578 */ 579 int 580 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 581 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 582 int cow) 583 { 584 vm_map_entry_t new_entry; 585 vm_map_entry_t prev_entry; 586 vm_map_entry_t temp_entry; 587 vm_eflags_t protoeflags; 588 589 GIANT_REQUIRED; 590 591 /* 592 * Check that the start and end points are not bogus. 593 */ 594 595 if ((start < map->min_offset) || (end > map->max_offset) || 596 (start >= end)) 597 return (KERN_INVALID_ADDRESS); 598 599 /* 600 * Find the entry prior to the proposed starting address; if it's part 601 * of an existing entry, this range is bogus. 602 */ 603 604 if (vm_map_lookup_entry(map, start, &temp_entry)) 605 return (KERN_NO_SPACE); 606 607 prev_entry = temp_entry; 608 609 /* 610 * Assert that the next entry doesn't overlap the end point. 611 */ 612 613 if ((prev_entry->next != &map->header) && 614 (prev_entry->next->start < end)) 615 return (KERN_NO_SPACE); 616 617 protoeflags = 0; 618 619 if (cow & MAP_COPY_ON_WRITE) 620 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 621 622 if (cow & MAP_NOFAULT) { 623 protoeflags |= MAP_ENTRY_NOFAULT; 624 625 KASSERT(object == NULL, 626 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 627 } 628 if (cow & MAP_DISABLE_SYNCER) 629 protoeflags |= MAP_ENTRY_NOSYNC; 630 if (cow & MAP_DISABLE_COREDUMP) 631 protoeflags |= MAP_ENTRY_NOCOREDUMP; 632 633 if (object) { 634 /* 635 * When object is non-NULL, it could be shared with another 636 * process. We have to set or clear OBJ_ONEMAPPING 637 * appropriately. 638 */ 639 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 640 vm_object_clear_flag(object, OBJ_ONEMAPPING); 641 } 642 } 643 else if ((prev_entry != &map->header) && 644 (prev_entry->eflags == protoeflags) && 645 (prev_entry->end == start) && 646 (prev_entry->wired_count == 0) && 647 ((prev_entry->object.vm_object == NULL) || 648 vm_object_coalesce(prev_entry->object.vm_object, 649 OFF_TO_IDX(prev_entry->offset), 650 (vm_size_t)(prev_entry->end - prev_entry->start), 651 (vm_size_t)(end - prev_entry->end)))) { 652 /* 653 * We were able to extend the object. Determine if we 654 * can extend the previous map entry to include the 655 * new range as well. 656 */ 657 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 658 (prev_entry->protection == prot) && 659 (prev_entry->max_protection == max)) { 660 map->size += (end - prev_entry->end); 661 prev_entry->end = end; 662 vm_map_simplify_entry(map, prev_entry); 663 return (KERN_SUCCESS); 664 } 665 666 /* 667 * If we can extend the object but cannot extend the 668 * map entry, we have to create a new map entry. We 669 * must bump the ref count on the extended object to 670 * account for it. object may be NULL. 671 */ 672 object = prev_entry->object.vm_object; 673 offset = prev_entry->offset + 674 (prev_entry->end - prev_entry->start); 675 vm_object_reference(object); 676 } 677 678 /* 679 * NOTE: if conditionals fail, object can be NULL here. This occurs 680 * in things like the buffer map where we manage kva but do not manage 681 * backing objects. 682 */ 683 684 /* 685 * Create a new entry 686 */ 687 688 new_entry = vm_map_entry_create(map); 689 new_entry->start = start; 690 new_entry->end = end; 691 692 new_entry->eflags = protoeflags; 693 new_entry->object.vm_object = object; 694 new_entry->offset = offset; 695 new_entry->avail_ssize = 0; 696 697 new_entry->inheritance = VM_INHERIT_DEFAULT; 698 new_entry->protection = prot; 699 new_entry->max_protection = max; 700 new_entry->wired_count = 0; 701 702 /* 703 * Insert the new entry into the list 704 */ 705 706 vm_map_entry_link(map, prev_entry, new_entry); 707 map->size += new_entry->end - new_entry->start; 708 709 /* 710 * Update the free space hint 711 */ 712 if ((map->first_free == prev_entry) && 713 (prev_entry->end >= new_entry->start)) { 714 map->first_free = new_entry; 715 } 716 717 #if 0 718 /* 719 * Temporarily removed to avoid MAP_STACK panic, due to 720 * MAP_STACK being a huge hack. Will be added back in 721 * when MAP_STACK (and the user stack mapping) is fixed. 722 */ 723 /* 724 * It may be possible to simplify the entry 725 */ 726 vm_map_simplify_entry(map, new_entry); 727 #endif 728 729 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 730 pmap_object_init_pt(map->pmap, start, 731 object, OFF_TO_IDX(offset), end - start, 732 cow & MAP_PREFAULT_PARTIAL); 733 } 734 735 return (KERN_SUCCESS); 736 } 737 738 /* 739 * Find sufficient space for `length' bytes in the given map, starting at 740 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 741 */ 742 int 743 vm_map_findspace( 744 vm_map_t map, 745 vm_offset_t start, 746 vm_size_t length, 747 vm_offset_t *addr) 748 { 749 vm_map_entry_t entry, next; 750 vm_offset_t end; 751 752 GIANT_REQUIRED; 753 if (start < map->min_offset) 754 start = map->min_offset; 755 if (start > map->max_offset) 756 return (1); 757 758 /* 759 * Look for the first possible address; if there's already something 760 * at this address, we have to start after it. 761 */ 762 if (start == map->min_offset) { 763 if ((entry = map->first_free) != &map->header) 764 start = entry->end; 765 } else { 766 vm_map_entry_t tmp; 767 768 if (vm_map_lookup_entry(map, start, &tmp)) 769 start = tmp->end; 770 entry = tmp; 771 } 772 773 /* 774 * Look through the rest of the map, trying to fit a new region in the 775 * gap between existing regions, or after the very last region. 776 */ 777 for (;; start = (entry = next)->end) { 778 /* 779 * Find the end of the proposed new region. Be sure we didn't 780 * go beyond the end of the map, or wrap around the address; 781 * if so, we lose. Otherwise, if this is the last entry, or 782 * if the proposed new region fits before the next entry, we 783 * win. 784 */ 785 end = start + length; 786 if (end > map->max_offset || end < start) 787 return (1); 788 next = entry->next; 789 if (next == &map->header || next->start >= end) 790 break; 791 } 792 SAVE_HINT(map, entry); 793 *addr = start; 794 if (map == kernel_map) { 795 vm_offset_t ksize; 796 if ((ksize = round_page(start + length)) > kernel_vm_end) { 797 pmap_growkernel(ksize); 798 } 799 } 800 return (0); 801 } 802 803 /* 804 * vm_map_find finds an unallocated region in the target address 805 * map with the given length. The search is defined to be 806 * first-fit from the specified address; the region found is 807 * returned in the same parameter. 808 * 809 * If object is non-NULL, ref count must be bumped by caller 810 * prior to making call to account for the new entry. 811 */ 812 int 813 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 814 vm_offset_t *addr, /* IN/OUT */ 815 vm_size_t length, boolean_t find_space, vm_prot_t prot, 816 vm_prot_t max, int cow) 817 { 818 vm_offset_t start; 819 int result, s = 0; 820 821 GIANT_REQUIRED; 822 823 start = *addr; 824 825 if (map == kmem_map) 826 s = splvm(); 827 828 vm_map_lock(map); 829 if (find_space) { 830 if (vm_map_findspace(map, start, length, addr)) { 831 vm_map_unlock(map); 832 if (map == kmem_map) 833 splx(s); 834 return (KERN_NO_SPACE); 835 } 836 start = *addr; 837 } 838 result = vm_map_insert(map, object, offset, 839 start, start + length, prot, max, cow); 840 vm_map_unlock(map); 841 842 if (map == kmem_map) 843 splx(s); 844 845 return (result); 846 } 847 848 /* 849 * vm_map_simplify_entry: 850 * 851 * Simplify the given map entry by merging with either neighbor. This 852 * routine also has the ability to merge with both neighbors. 853 * 854 * The map must be locked. 855 * 856 * This routine guarentees that the passed entry remains valid (though 857 * possibly extended). When merging, this routine may delete one or 858 * both neighbors. 859 */ 860 void 861 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) 862 { 863 vm_map_entry_t next, prev; 864 vm_size_t prevsize, esize; 865 866 GIANT_REQUIRED; 867 868 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 869 return; 870 871 prev = entry->prev; 872 if (prev != &map->header) { 873 prevsize = prev->end - prev->start; 874 if ( (prev->end == entry->start) && 875 (prev->object.vm_object == entry->object.vm_object) && 876 (!prev->object.vm_object || 877 (prev->offset + prevsize == entry->offset)) && 878 (prev->eflags == entry->eflags) && 879 (prev->protection == entry->protection) && 880 (prev->max_protection == entry->max_protection) && 881 (prev->inheritance == entry->inheritance) && 882 (prev->wired_count == entry->wired_count)) { 883 if (map->first_free == prev) 884 map->first_free = entry; 885 if (map->hint == prev) 886 map->hint = entry; 887 vm_map_entry_unlink(map, prev); 888 entry->start = prev->start; 889 entry->offset = prev->offset; 890 if (prev->object.vm_object) 891 vm_object_deallocate(prev->object.vm_object); 892 vm_map_entry_dispose(map, prev); 893 } 894 } 895 896 next = entry->next; 897 if (next != &map->header) { 898 esize = entry->end - entry->start; 899 if ((entry->end == next->start) && 900 (next->object.vm_object == entry->object.vm_object) && 901 (!entry->object.vm_object || 902 (entry->offset + esize == next->offset)) && 903 (next->eflags == entry->eflags) && 904 (next->protection == entry->protection) && 905 (next->max_protection == entry->max_protection) && 906 (next->inheritance == entry->inheritance) && 907 (next->wired_count == entry->wired_count)) { 908 if (map->first_free == next) 909 map->first_free = entry; 910 if (map->hint == next) 911 map->hint = entry; 912 vm_map_entry_unlink(map, next); 913 entry->end = next->end; 914 if (next->object.vm_object) 915 vm_object_deallocate(next->object.vm_object); 916 vm_map_entry_dispose(map, next); 917 } 918 } 919 } 920 /* 921 * vm_map_clip_start: [ internal use only ] 922 * 923 * Asserts that the given entry begins at or after 924 * the specified address; if necessary, 925 * it splits the entry into two. 926 */ 927 #define vm_map_clip_start(map, entry, startaddr) \ 928 { \ 929 if (startaddr > entry->start) \ 930 _vm_map_clip_start(map, entry, startaddr); \ 931 } 932 933 /* 934 * This routine is called only when it is known that 935 * the entry must be split. 936 */ 937 static void 938 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 939 { 940 vm_map_entry_t new_entry; 941 942 /* 943 * Split off the front portion -- note that we must insert the new 944 * entry BEFORE this one, so that this entry has the specified 945 * starting address. 946 */ 947 948 vm_map_simplify_entry(map, entry); 949 950 /* 951 * If there is no object backing this entry, we might as well create 952 * one now. If we defer it, an object can get created after the map 953 * is clipped, and individual objects will be created for the split-up 954 * map. This is a bit of a hack, but is also about the best place to 955 * put this improvement. 956 */ 957 958 if (entry->object.vm_object == NULL && !map->system_map) { 959 vm_object_t object; 960 object = vm_object_allocate(OBJT_DEFAULT, 961 atop(entry->end - entry->start)); 962 entry->object.vm_object = object; 963 entry->offset = 0; 964 } 965 966 new_entry = vm_map_entry_create(map); 967 *new_entry = *entry; 968 969 new_entry->end = start; 970 entry->offset += (start - entry->start); 971 entry->start = start; 972 973 vm_map_entry_link(map, entry->prev, new_entry); 974 975 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 976 vm_object_reference(new_entry->object.vm_object); 977 } 978 } 979 980 /* 981 * vm_map_clip_end: [ internal use only ] 982 * 983 * Asserts that the given entry ends at or before 984 * the specified address; if necessary, 985 * it splits the entry into two. 986 */ 987 988 #define vm_map_clip_end(map, entry, endaddr) \ 989 { \ 990 if (endaddr < entry->end) \ 991 _vm_map_clip_end(map, entry, endaddr); \ 992 } 993 994 /* 995 * This routine is called only when it is known that 996 * the entry must be split. 997 */ 998 static void 999 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 1000 { 1001 vm_map_entry_t new_entry; 1002 1003 /* 1004 * If there is no object backing this entry, we might as well create 1005 * one now. If we defer it, an object can get created after the map 1006 * is clipped, and individual objects will be created for the split-up 1007 * map. This is a bit of a hack, but is also about the best place to 1008 * put this improvement. 1009 */ 1010 1011 if (entry->object.vm_object == NULL && !map->system_map) { 1012 vm_object_t object; 1013 object = vm_object_allocate(OBJT_DEFAULT, 1014 atop(entry->end - entry->start)); 1015 entry->object.vm_object = object; 1016 entry->offset = 0; 1017 } 1018 1019 /* 1020 * Create a new entry and insert it AFTER the specified entry 1021 */ 1022 1023 new_entry = vm_map_entry_create(map); 1024 *new_entry = *entry; 1025 1026 new_entry->start = entry->end = end; 1027 new_entry->offset += (end - entry->start); 1028 1029 vm_map_entry_link(map, entry, new_entry); 1030 1031 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1032 vm_object_reference(new_entry->object.vm_object); 1033 } 1034 } 1035 1036 /* 1037 * VM_MAP_RANGE_CHECK: [ internal use only ] 1038 * 1039 * Asserts that the starting and ending region 1040 * addresses fall within the valid range of the map. 1041 */ 1042 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1043 { \ 1044 if (start < vm_map_min(map)) \ 1045 start = vm_map_min(map); \ 1046 if (end > vm_map_max(map)) \ 1047 end = vm_map_max(map); \ 1048 if (start > end) \ 1049 start = end; \ 1050 } 1051 1052 /* 1053 * vm_map_submap: [ kernel use only ] 1054 * 1055 * Mark the given range as handled by a subordinate map. 1056 * 1057 * This range must have been created with vm_map_find, 1058 * and no other operations may have been performed on this 1059 * range prior to calling vm_map_submap. 1060 * 1061 * Only a limited number of operations can be performed 1062 * within this rage after calling vm_map_submap: 1063 * vm_fault 1064 * [Don't try vm_map_copy!] 1065 * 1066 * To remove a submapping, one must first remove the 1067 * range from the superior map, and then destroy the 1068 * submap (if desired). [Better yet, don't try it.] 1069 */ 1070 int 1071 vm_map_submap( 1072 vm_map_t map, 1073 vm_offset_t start, 1074 vm_offset_t end, 1075 vm_map_t submap) 1076 { 1077 vm_map_entry_t entry; 1078 int result = KERN_INVALID_ARGUMENT; 1079 1080 GIANT_REQUIRED; 1081 1082 vm_map_lock(map); 1083 1084 VM_MAP_RANGE_CHECK(map, start, end); 1085 1086 if (vm_map_lookup_entry(map, start, &entry)) { 1087 vm_map_clip_start(map, entry, start); 1088 } else 1089 entry = entry->next; 1090 1091 vm_map_clip_end(map, entry, end); 1092 1093 if ((entry->start == start) && (entry->end == end) && 1094 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1095 (entry->object.vm_object == NULL)) { 1096 entry->object.sub_map = submap; 1097 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1098 result = KERN_SUCCESS; 1099 } 1100 vm_map_unlock(map); 1101 1102 return (result); 1103 } 1104 1105 /* 1106 * vm_map_protect: 1107 * 1108 * Sets the protection of the specified address 1109 * region in the target map. If "set_max" is 1110 * specified, the maximum protection is to be set; 1111 * otherwise, only the current protection is affected. 1112 */ 1113 int 1114 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1115 vm_prot_t new_prot, boolean_t set_max) 1116 { 1117 vm_map_entry_t current; 1118 vm_map_entry_t entry; 1119 1120 GIANT_REQUIRED; 1121 vm_map_lock(map); 1122 1123 VM_MAP_RANGE_CHECK(map, start, end); 1124 1125 if (vm_map_lookup_entry(map, start, &entry)) { 1126 vm_map_clip_start(map, entry, start); 1127 } else { 1128 entry = entry->next; 1129 } 1130 1131 /* 1132 * Make a first pass to check for protection violations. 1133 */ 1134 1135 current = entry; 1136 while ((current != &map->header) && (current->start < end)) { 1137 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1138 vm_map_unlock(map); 1139 return (KERN_INVALID_ARGUMENT); 1140 } 1141 if ((new_prot & current->max_protection) != new_prot) { 1142 vm_map_unlock(map); 1143 return (KERN_PROTECTION_FAILURE); 1144 } 1145 current = current->next; 1146 } 1147 1148 /* 1149 * Go back and fix up protections. [Note that clipping is not 1150 * necessary the second time.] 1151 */ 1152 1153 current = entry; 1154 1155 while ((current != &map->header) && (current->start < end)) { 1156 vm_prot_t old_prot; 1157 1158 vm_map_clip_end(map, current, end); 1159 1160 old_prot = current->protection; 1161 if (set_max) 1162 current->protection = 1163 (current->max_protection = new_prot) & 1164 old_prot; 1165 else 1166 current->protection = new_prot; 1167 1168 /* 1169 * Update physical map if necessary. Worry about copy-on-write 1170 * here -- CHECK THIS XXX 1171 */ 1172 1173 if (current->protection != old_prot) { 1174 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1175 VM_PROT_ALL) 1176 1177 pmap_protect(map->pmap, current->start, 1178 current->end, 1179 current->protection & MASK(current)); 1180 #undef MASK 1181 } 1182 1183 vm_map_simplify_entry(map, current); 1184 1185 current = current->next; 1186 } 1187 1188 vm_map_unlock(map); 1189 return (KERN_SUCCESS); 1190 } 1191 1192 /* 1193 * vm_map_madvise: 1194 * 1195 * This routine traverses a processes map handling the madvise 1196 * system call. Advisories are classified as either those effecting 1197 * the vm_map_entry structure, or those effecting the underlying 1198 * objects. 1199 */ 1200 1201 int 1202 vm_map_madvise( 1203 vm_map_t map, 1204 vm_offset_t start, 1205 vm_offset_t end, 1206 int behav) 1207 { 1208 vm_map_entry_t current, entry; 1209 int modify_map = 0; 1210 1211 GIANT_REQUIRED; 1212 1213 /* 1214 * Some madvise calls directly modify the vm_map_entry, in which case 1215 * we need to use an exclusive lock on the map and we need to perform 1216 * various clipping operations. Otherwise we only need a read-lock 1217 * on the map. 1218 */ 1219 1220 switch(behav) { 1221 case MADV_NORMAL: 1222 case MADV_SEQUENTIAL: 1223 case MADV_RANDOM: 1224 case MADV_NOSYNC: 1225 case MADV_AUTOSYNC: 1226 case MADV_NOCORE: 1227 case MADV_CORE: 1228 modify_map = 1; 1229 vm_map_lock(map); 1230 break; 1231 case MADV_WILLNEED: 1232 case MADV_DONTNEED: 1233 case MADV_FREE: 1234 vm_map_lock_read(map); 1235 break; 1236 default: 1237 return (KERN_INVALID_ARGUMENT); 1238 } 1239 1240 /* 1241 * Locate starting entry and clip if necessary. 1242 */ 1243 1244 VM_MAP_RANGE_CHECK(map, start, end); 1245 1246 if (vm_map_lookup_entry(map, start, &entry)) { 1247 if (modify_map) 1248 vm_map_clip_start(map, entry, start); 1249 } else { 1250 entry = entry->next; 1251 } 1252 1253 if (modify_map) { 1254 /* 1255 * madvise behaviors that are implemented in the vm_map_entry. 1256 * 1257 * We clip the vm_map_entry so that behavioral changes are 1258 * limited to the specified address range. 1259 */ 1260 for (current = entry; 1261 (current != &map->header) && (current->start < end); 1262 current = current->next 1263 ) { 1264 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1265 continue; 1266 1267 vm_map_clip_end(map, current, end); 1268 1269 switch (behav) { 1270 case MADV_NORMAL: 1271 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1272 break; 1273 case MADV_SEQUENTIAL: 1274 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1275 break; 1276 case MADV_RANDOM: 1277 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1278 break; 1279 case MADV_NOSYNC: 1280 current->eflags |= MAP_ENTRY_NOSYNC; 1281 break; 1282 case MADV_AUTOSYNC: 1283 current->eflags &= ~MAP_ENTRY_NOSYNC; 1284 break; 1285 case MADV_NOCORE: 1286 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1287 break; 1288 case MADV_CORE: 1289 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1290 break; 1291 default: 1292 break; 1293 } 1294 vm_map_simplify_entry(map, current); 1295 } 1296 vm_map_unlock(map); 1297 } else { 1298 vm_pindex_t pindex; 1299 int count; 1300 1301 /* 1302 * madvise behaviors that are implemented in the underlying 1303 * vm_object. 1304 * 1305 * Since we don't clip the vm_map_entry, we have to clip 1306 * the vm_object pindex and count. 1307 */ 1308 for (current = entry; 1309 (current != &map->header) && (current->start < end); 1310 current = current->next 1311 ) { 1312 vm_offset_t useStart; 1313 1314 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1315 continue; 1316 1317 pindex = OFF_TO_IDX(current->offset); 1318 count = atop(current->end - current->start); 1319 useStart = current->start; 1320 1321 if (current->start < start) { 1322 pindex += atop(start - current->start); 1323 count -= atop(start - current->start); 1324 useStart = start; 1325 } 1326 if (current->end > end) 1327 count -= atop(current->end - end); 1328 1329 if (count <= 0) 1330 continue; 1331 1332 vm_object_madvise(current->object.vm_object, 1333 pindex, count, behav); 1334 if (behav == MADV_WILLNEED) { 1335 pmap_object_init_pt( 1336 map->pmap, 1337 useStart, 1338 current->object.vm_object, 1339 pindex, 1340 (count << PAGE_SHIFT), 1341 0 1342 ); 1343 } 1344 } 1345 vm_map_unlock_read(map); 1346 } 1347 return(0); 1348 } 1349 1350 1351 /* 1352 * vm_map_inherit: 1353 * 1354 * Sets the inheritance of the specified address 1355 * range in the target map. Inheritance 1356 * affects how the map will be shared with 1357 * child maps at the time of vm_map_fork. 1358 */ 1359 int 1360 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1361 vm_inherit_t new_inheritance) 1362 { 1363 vm_map_entry_t entry; 1364 vm_map_entry_t temp_entry; 1365 1366 GIANT_REQUIRED; 1367 1368 switch (new_inheritance) { 1369 case VM_INHERIT_NONE: 1370 case VM_INHERIT_COPY: 1371 case VM_INHERIT_SHARE: 1372 break; 1373 default: 1374 return (KERN_INVALID_ARGUMENT); 1375 } 1376 1377 vm_map_lock(map); 1378 1379 VM_MAP_RANGE_CHECK(map, start, end); 1380 1381 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1382 entry = temp_entry; 1383 vm_map_clip_start(map, entry, start); 1384 } else 1385 entry = temp_entry->next; 1386 1387 while ((entry != &map->header) && (entry->start < end)) { 1388 vm_map_clip_end(map, entry, end); 1389 1390 entry->inheritance = new_inheritance; 1391 1392 vm_map_simplify_entry(map, entry); 1393 1394 entry = entry->next; 1395 } 1396 1397 vm_map_unlock(map); 1398 return (KERN_SUCCESS); 1399 } 1400 1401 /* 1402 * Implement the semantics of mlock 1403 */ 1404 int 1405 vm_map_user_pageable( 1406 vm_map_t map, 1407 vm_offset_t start, 1408 vm_offset_t end, 1409 boolean_t new_pageable) 1410 { 1411 vm_map_entry_t entry; 1412 vm_map_entry_t start_entry; 1413 vm_offset_t estart; 1414 int rv; 1415 1416 vm_map_lock(map); 1417 VM_MAP_RANGE_CHECK(map, start, end); 1418 1419 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1420 vm_map_unlock(map); 1421 return (KERN_INVALID_ADDRESS); 1422 } 1423 1424 if (new_pageable) { 1425 1426 entry = start_entry; 1427 vm_map_clip_start(map, entry, start); 1428 1429 /* 1430 * Now decrement the wiring count for each region. If a region 1431 * becomes completely unwired, unwire its physical pages and 1432 * mappings. 1433 */ 1434 while ((entry != &map->header) && (entry->start < end)) { 1435 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1436 vm_map_clip_end(map, entry, end); 1437 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1438 entry->wired_count--; 1439 if (entry->wired_count == 0) 1440 vm_fault_unwire(map, entry->start, entry->end); 1441 } 1442 vm_map_simplify_entry(map,entry); 1443 entry = entry->next; 1444 } 1445 } else { 1446 1447 entry = start_entry; 1448 1449 while ((entry != &map->header) && (entry->start < end)) { 1450 1451 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1452 entry = entry->next; 1453 continue; 1454 } 1455 1456 if (entry->wired_count != 0) { 1457 entry->wired_count++; 1458 entry->eflags |= MAP_ENTRY_USER_WIRED; 1459 entry = entry->next; 1460 continue; 1461 } 1462 1463 /* Here on entry being newly wired */ 1464 1465 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1466 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1467 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1468 1469 vm_object_shadow(&entry->object.vm_object, 1470 &entry->offset, 1471 atop(entry->end - entry->start)); 1472 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1473 1474 } else if (entry->object.vm_object == NULL && 1475 !map->system_map) { 1476 1477 entry->object.vm_object = 1478 vm_object_allocate(OBJT_DEFAULT, 1479 atop(entry->end - entry->start)); 1480 entry->offset = (vm_offset_t) 0; 1481 1482 } 1483 } 1484 1485 vm_map_clip_start(map, entry, start); 1486 vm_map_clip_end(map, entry, end); 1487 1488 entry->wired_count++; 1489 entry->eflags |= MAP_ENTRY_USER_WIRED; 1490 estart = entry->start; 1491 1492 /* First we need to allow map modifications */ 1493 vm_map_set_recursive(map); 1494 vm_map_lock_downgrade(map); 1495 map->timestamp++; 1496 1497 rv = vm_fault_user_wire(map, entry->start, entry->end); 1498 if (rv) { 1499 1500 entry->wired_count--; 1501 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1502 1503 vm_map_clear_recursive(map); 1504 vm_map_unlock(map); 1505 1506 (void) vm_map_user_pageable(map, start, entry->start, TRUE); 1507 return rv; 1508 } 1509 1510 vm_map_clear_recursive(map); 1511 if (vm_map_lock_upgrade(map)) { 1512 vm_map_lock(map); 1513 if (vm_map_lookup_entry(map, estart, &entry) 1514 == FALSE) { 1515 vm_map_unlock(map); 1516 (void) vm_map_user_pageable(map, 1517 start, 1518 estart, 1519 TRUE); 1520 return (KERN_INVALID_ADDRESS); 1521 } 1522 } 1523 vm_map_simplify_entry(map,entry); 1524 } 1525 } 1526 map->timestamp++; 1527 vm_map_unlock(map); 1528 return KERN_SUCCESS; 1529 } 1530 1531 /* 1532 * vm_map_pageable: 1533 * 1534 * Sets the pageability of the specified address 1535 * range in the target map. Regions specified 1536 * as not pageable require locked-down physical 1537 * memory and physical page maps. 1538 * 1539 * The map must not be locked, but a reference 1540 * must remain to the map throughout the call. 1541 */ 1542 int 1543 vm_map_pageable( 1544 vm_map_t map, 1545 vm_offset_t start, 1546 vm_offset_t end, 1547 boolean_t new_pageable) 1548 { 1549 vm_map_entry_t entry; 1550 vm_map_entry_t start_entry; 1551 vm_offset_t failed = 0; 1552 int rv; 1553 1554 GIANT_REQUIRED; 1555 1556 vm_map_lock(map); 1557 1558 VM_MAP_RANGE_CHECK(map, start, end); 1559 1560 /* 1561 * Only one pageability change may take place at one time, since 1562 * vm_fault assumes it will be called only once for each 1563 * wiring/unwiring. Therefore, we have to make sure we're actually 1564 * changing the pageability for the entire region. We do so before 1565 * making any changes. 1566 */ 1567 1568 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1569 vm_map_unlock(map); 1570 return (KERN_INVALID_ADDRESS); 1571 } 1572 entry = start_entry; 1573 1574 /* 1575 * Actions are rather different for wiring and unwiring, so we have 1576 * two separate cases. 1577 */ 1578 1579 if (new_pageable) { 1580 1581 vm_map_clip_start(map, entry, start); 1582 1583 /* 1584 * Unwiring. First ensure that the range to be unwired is 1585 * really wired down and that there are no holes. 1586 */ 1587 while ((entry != &map->header) && (entry->start < end)) { 1588 1589 if (entry->wired_count == 0 || 1590 (entry->end < end && 1591 (entry->next == &map->header || 1592 entry->next->start > entry->end))) { 1593 vm_map_unlock(map); 1594 return (KERN_INVALID_ARGUMENT); 1595 } 1596 entry = entry->next; 1597 } 1598 1599 /* 1600 * Now decrement the wiring count for each region. If a region 1601 * becomes completely unwired, unwire its physical pages and 1602 * mappings. 1603 */ 1604 entry = start_entry; 1605 while ((entry != &map->header) && (entry->start < end)) { 1606 vm_map_clip_end(map, entry, end); 1607 1608 entry->wired_count--; 1609 if (entry->wired_count == 0) 1610 vm_fault_unwire(map, entry->start, entry->end); 1611 1612 vm_map_simplify_entry(map, entry); 1613 1614 entry = entry->next; 1615 } 1616 } else { 1617 /* 1618 * Wiring. We must do this in two passes: 1619 * 1620 * 1. Holding the write lock, we create any shadow or zero-fill 1621 * objects that need to be created. Then we clip each map 1622 * entry to the region to be wired and increment its wiring 1623 * count. We create objects before clipping the map entries 1624 * to avoid object proliferation. 1625 * 1626 * 2. We downgrade to a read lock, and call vm_fault_wire to 1627 * fault in the pages for any newly wired area (wired_count is 1628 * 1). 1629 * 1630 * Downgrading to a read lock for vm_fault_wire avoids a possible 1631 * deadlock with another process that may have faulted on one 1632 * of the pages to be wired (it would mark the page busy, 1633 * blocking us, then in turn block on the map lock that we 1634 * hold). Because of problems in the recursive lock package, 1635 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1636 * any actions that require the write lock must be done 1637 * beforehand. Because we keep the read lock on the map, the 1638 * copy-on-write status of the entries we modify here cannot 1639 * change. 1640 */ 1641 1642 /* 1643 * Pass 1. 1644 */ 1645 while ((entry != &map->header) && (entry->start < end)) { 1646 if (entry->wired_count == 0) { 1647 1648 /* 1649 * Perform actions of vm_map_lookup that need 1650 * the write lock on the map: create a shadow 1651 * object for a copy-on-write region, or an 1652 * object for a zero-fill region. 1653 * 1654 * We don't have to do this for entries that 1655 * point to sub maps, because we won't 1656 * hold the lock on the sub map. 1657 */ 1658 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1659 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1660 if (copyflag && 1661 ((entry->protection & VM_PROT_WRITE) != 0)) { 1662 1663 vm_object_shadow(&entry->object.vm_object, 1664 &entry->offset, 1665 atop(entry->end - entry->start)); 1666 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1667 } else if (entry->object.vm_object == NULL && 1668 !map->system_map) { 1669 entry->object.vm_object = 1670 vm_object_allocate(OBJT_DEFAULT, 1671 atop(entry->end - entry->start)); 1672 entry->offset = (vm_offset_t) 0; 1673 } 1674 } 1675 } 1676 vm_map_clip_start(map, entry, start); 1677 vm_map_clip_end(map, entry, end); 1678 entry->wired_count++; 1679 1680 /* 1681 * Check for holes 1682 */ 1683 if (entry->end < end && 1684 (entry->next == &map->header || 1685 entry->next->start > entry->end)) { 1686 /* 1687 * Found one. Object creation actions do not 1688 * need to be undone, but the wired counts 1689 * need to be restored. 1690 */ 1691 while (entry != &map->header && entry->end > start) { 1692 entry->wired_count--; 1693 entry = entry->prev; 1694 } 1695 vm_map_unlock(map); 1696 return (KERN_INVALID_ARGUMENT); 1697 } 1698 entry = entry->next; 1699 } 1700 1701 /* 1702 * Pass 2. 1703 */ 1704 1705 /* 1706 * HACK HACK HACK HACK 1707 * 1708 * If we are wiring in the kernel map or a submap of it, 1709 * unlock the map to avoid deadlocks. We trust that the 1710 * kernel is well-behaved, and therefore will not do 1711 * anything destructive to this region of the map while 1712 * we have it unlocked. We cannot trust user processes 1713 * to do the same. 1714 * 1715 * HACK HACK HACK HACK 1716 */ 1717 if (vm_map_pmap(map) == kernel_pmap) { 1718 vm_map_unlock(map); /* trust me ... */ 1719 } else { 1720 vm_map_lock_downgrade(map); 1721 } 1722 1723 rv = 0; 1724 entry = start_entry; 1725 while (entry != &map->header && entry->start < end) { 1726 /* 1727 * If vm_fault_wire fails for any page we need to undo 1728 * what has been done. We decrement the wiring count 1729 * for those pages which have not yet been wired (now) 1730 * and unwire those that have (later). 1731 * 1732 * XXX this violates the locking protocol on the map, 1733 * needs to be fixed. 1734 */ 1735 if (rv) 1736 entry->wired_count--; 1737 else if (entry->wired_count == 1) { 1738 rv = vm_fault_wire(map, entry->start, entry->end); 1739 if (rv) { 1740 failed = entry->start; 1741 entry->wired_count--; 1742 } 1743 } 1744 entry = entry->next; 1745 } 1746 1747 if (vm_map_pmap(map) == kernel_pmap) { 1748 vm_map_lock(map); 1749 } 1750 if (rv) { 1751 vm_map_unlock(map); 1752 (void) vm_map_pageable(map, start, failed, TRUE); 1753 return (rv); 1754 } 1755 vm_map_simplify_entry(map, start_entry); 1756 } 1757 1758 vm_map_unlock(map); 1759 1760 return (KERN_SUCCESS); 1761 } 1762 1763 /* 1764 * vm_map_clean 1765 * 1766 * Push any dirty cached pages in the address range to their pager. 1767 * If syncio is TRUE, dirty pages are written synchronously. 1768 * If invalidate is TRUE, any cached pages are freed as well. 1769 * 1770 * Returns an error if any part of the specified range is not mapped. 1771 */ 1772 int 1773 vm_map_clean( 1774 vm_map_t map, 1775 vm_offset_t start, 1776 vm_offset_t end, 1777 boolean_t syncio, 1778 boolean_t invalidate) 1779 { 1780 vm_map_entry_t current; 1781 vm_map_entry_t entry; 1782 vm_size_t size; 1783 vm_object_t object; 1784 vm_ooffset_t offset; 1785 1786 GIANT_REQUIRED; 1787 1788 vm_map_lock_read(map); 1789 VM_MAP_RANGE_CHECK(map, start, end); 1790 if (!vm_map_lookup_entry(map, start, &entry)) { 1791 vm_map_unlock_read(map); 1792 return (KERN_INVALID_ADDRESS); 1793 } 1794 /* 1795 * Make a first pass to check for holes. 1796 */ 1797 for (current = entry; current->start < end; current = current->next) { 1798 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1799 vm_map_unlock_read(map); 1800 return (KERN_INVALID_ARGUMENT); 1801 } 1802 if (end > current->end && 1803 (current->next == &map->header || 1804 current->end != current->next->start)) { 1805 vm_map_unlock_read(map); 1806 return (KERN_INVALID_ADDRESS); 1807 } 1808 } 1809 1810 if (invalidate) 1811 pmap_remove(vm_map_pmap(map), start, end); 1812 /* 1813 * Make a second pass, cleaning/uncaching pages from the indicated 1814 * objects as we go. 1815 */ 1816 for (current = entry; current->start < end; current = current->next) { 1817 offset = current->offset + (start - current->start); 1818 size = (end <= current->end ? end : current->end) - start; 1819 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1820 vm_map_t smap; 1821 vm_map_entry_t tentry; 1822 vm_size_t tsize; 1823 1824 smap = current->object.sub_map; 1825 vm_map_lock_read(smap); 1826 (void) vm_map_lookup_entry(smap, offset, &tentry); 1827 tsize = tentry->end - offset; 1828 if (tsize < size) 1829 size = tsize; 1830 object = tentry->object.vm_object; 1831 offset = tentry->offset + (offset - tentry->start); 1832 vm_map_unlock_read(smap); 1833 } else { 1834 object = current->object.vm_object; 1835 } 1836 /* 1837 * Note that there is absolutely no sense in writing out 1838 * anonymous objects, so we track down the vnode object 1839 * to write out. 1840 * We invalidate (remove) all pages from the address space 1841 * anyway, for semantic correctness. 1842 */ 1843 while (object->backing_object) { 1844 object = object->backing_object; 1845 offset += object->backing_object_offset; 1846 if (object->size < OFF_TO_IDX( offset + size)) 1847 size = IDX_TO_OFF(object->size) - offset; 1848 } 1849 if (object && (object->type == OBJT_VNODE) && 1850 (current->protection & VM_PROT_WRITE)) { 1851 /* 1852 * Flush pages if writing is allowed, invalidate them 1853 * if invalidation requested. Pages undergoing I/O 1854 * will be ignored by vm_object_page_remove(). 1855 * 1856 * We cannot lock the vnode and then wait for paging 1857 * to complete without deadlocking against vm_fault. 1858 * Instead we simply call vm_object_page_remove() and 1859 * allow it to block internally on a page-by-page 1860 * basis when it encounters pages undergoing async 1861 * I/O. 1862 */ 1863 int flags; 1864 1865 vm_object_reference(object); 1866 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 1867 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1868 flags |= invalidate ? OBJPC_INVAL : 0; 1869 vm_object_page_clean(object, 1870 OFF_TO_IDX(offset), 1871 OFF_TO_IDX(offset + size + PAGE_MASK), 1872 flags); 1873 if (invalidate) { 1874 /*vm_object_pip_wait(object, "objmcl");*/ 1875 vm_object_page_remove(object, 1876 OFF_TO_IDX(offset), 1877 OFF_TO_IDX(offset + size + PAGE_MASK), 1878 FALSE); 1879 } 1880 VOP_UNLOCK(object->handle, 0, curthread); 1881 vm_object_deallocate(object); 1882 } 1883 start += size; 1884 } 1885 1886 vm_map_unlock_read(map); 1887 return (KERN_SUCCESS); 1888 } 1889 1890 /* 1891 * vm_map_entry_unwire: [ internal use only ] 1892 * 1893 * Make the region specified by this entry pageable. 1894 * 1895 * The map in question should be locked. 1896 * [This is the reason for this routine's existence.] 1897 */ 1898 static void 1899 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 1900 { 1901 vm_fault_unwire(map, entry->start, entry->end); 1902 entry->wired_count = 0; 1903 } 1904 1905 /* 1906 * vm_map_entry_delete: [ internal use only ] 1907 * 1908 * Deallocate the given entry from the target map. 1909 */ 1910 static void 1911 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 1912 { 1913 vm_map_entry_unlink(map, entry); 1914 map->size -= entry->end - entry->start; 1915 1916 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1917 vm_object_deallocate(entry->object.vm_object); 1918 } 1919 1920 vm_map_entry_dispose(map, entry); 1921 } 1922 1923 /* 1924 * vm_map_delete: [ internal use only ] 1925 * 1926 * Deallocates the given address range from the target 1927 * map. 1928 */ 1929 int 1930 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 1931 { 1932 vm_object_t object; 1933 vm_map_entry_t entry; 1934 vm_map_entry_t first_entry; 1935 1936 GIANT_REQUIRED; 1937 1938 /* 1939 * Find the start of the region, and clip it 1940 */ 1941 1942 if (!vm_map_lookup_entry(map, start, &first_entry)) 1943 entry = first_entry->next; 1944 else { 1945 entry = first_entry; 1946 vm_map_clip_start(map, entry, start); 1947 /* 1948 * Fix the lookup hint now, rather than each time though the 1949 * loop. 1950 */ 1951 SAVE_HINT(map, entry->prev); 1952 } 1953 1954 /* 1955 * Save the free space hint 1956 */ 1957 1958 if (entry == &map->header) { 1959 map->first_free = &map->header; 1960 } else if (map->first_free->start >= start) { 1961 map->first_free = entry->prev; 1962 } 1963 1964 /* 1965 * Step through all entries in this region 1966 */ 1967 1968 while ((entry != &map->header) && (entry->start < end)) { 1969 vm_map_entry_t next; 1970 vm_offset_t s, e; 1971 vm_pindex_t offidxstart, offidxend, count; 1972 1973 vm_map_clip_end(map, entry, end); 1974 1975 s = entry->start; 1976 e = entry->end; 1977 next = entry->next; 1978 1979 offidxstart = OFF_TO_IDX(entry->offset); 1980 count = OFF_TO_IDX(e - s); 1981 object = entry->object.vm_object; 1982 1983 /* 1984 * Unwire before removing addresses from the pmap; otherwise, 1985 * unwiring will put the entries back in the pmap. 1986 */ 1987 if (entry->wired_count != 0) { 1988 vm_map_entry_unwire(map, entry); 1989 } 1990 1991 offidxend = offidxstart + count; 1992 1993 if ((object == kernel_object) || (object == kmem_object)) { 1994 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 1995 } else { 1996 pmap_remove(map->pmap, s, e); 1997 if (object != NULL && 1998 object->ref_count != 1 && 1999 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2000 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2001 vm_object_collapse(object); 2002 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2003 if (object->type == OBJT_SWAP) { 2004 swap_pager_freespace(object, offidxstart, count); 2005 } 2006 if (offidxend >= object->size && 2007 offidxstart < object->size) { 2008 object->size = offidxstart; 2009 } 2010 } 2011 } 2012 2013 /* 2014 * Delete the entry (which may delete the object) only after 2015 * removing all pmap entries pointing to its pages. 2016 * (Otherwise, its page frames may be reallocated, and any 2017 * modify bits will be set in the wrong object!) 2018 */ 2019 vm_map_entry_delete(map, entry); 2020 entry = next; 2021 } 2022 return (KERN_SUCCESS); 2023 } 2024 2025 /* 2026 * vm_map_remove: 2027 * 2028 * Remove the given address range from the target map. 2029 * This is the exported form of vm_map_delete. 2030 */ 2031 int 2032 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2033 { 2034 int result, s = 0; 2035 2036 GIANT_REQUIRED; 2037 2038 if (map == kmem_map) 2039 s = splvm(); 2040 2041 vm_map_lock(map); 2042 VM_MAP_RANGE_CHECK(map, start, end); 2043 result = vm_map_delete(map, start, end); 2044 vm_map_unlock(map); 2045 2046 if (map == kmem_map) 2047 splx(s); 2048 2049 return (result); 2050 } 2051 2052 /* 2053 * vm_map_check_protection: 2054 * 2055 * Assert that the target map allows the specified 2056 * privilege on the entire address region given. 2057 * The entire region must be allocated. 2058 */ 2059 boolean_t 2060 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2061 vm_prot_t protection) 2062 { 2063 vm_map_entry_t entry; 2064 vm_map_entry_t tmp_entry; 2065 2066 GIANT_REQUIRED; 2067 2068 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2069 return (FALSE); 2070 } 2071 entry = tmp_entry; 2072 2073 while (start < end) { 2074 if (entry == &map->header) { 2075 return (FALSE); 2076 } 2077 /* 2078 * No holes allowed! 2079 */ 2080 2081 if (start < entry->start) { 2082 return (FALSE); 2083 } 2084 /* 2085 * Check protection associated with entry. 2086 */ 2087 2088 if ((entry->protection & protection) != protection) { 2089 return (FALSE); 2090 } 2091 /* go to next entry */ 2092 2093 start = entry->end; 2094 entry = entry->next; 2095 } 2096 return (TRUE); 2097 } 2098 2099 /* 2100 * Split the pages in a map entry into a new object. This affords 2101 * easier removal of unused pages, and keeps object inheritance from 2102 * being a negative impact on memory usage. 2103 */ 2104 static void 2105 vm_map_split(vm_map_entry_t entry) 2106 { 2107 vm_page_t m; 2108 vm_object_t orig_object, new_object, source; 2109 vm_offset_t s, e; 2110 vm_pindex_t offidxstart, offidxend, idx; 2111 vm_size_t size; 2112 vm_ooffset_t offset; 2113 2114 GIANT_REQUIRED; 2115 2116 orig_object = entry->object.vm_object; 2117 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2118 return; 2119 if (orig_object->ref_count <= 1) 2120 return; 2121 2122 offset = entry->offset; 2123 s = entry->start; 2124 e = entry->end; 2125 2126 offidxstart = OFF_TO_IDX(offset); 2127 offidxend = offidxstart + OFF_TO_IDX(e - s); 2128 size = offidxend - offidxstart; 2129 2130 new_object = vm_pager_allocate(orig_object->type, 2131 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2132 if (new_object == NULL) 2133 return; 2134 2135 source = orig_object->backing_object; 2136 if (source != NULL) { 2137 vm_object_reference(source); /* Referenced by new_object */ 2138 TAILQ_INSERT_TAIL(&source->shadow_head, 2139 new_object, shadow_list); 2140 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2141 new_object->backing_object_offset = 2142 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2143 new_object->backing_object = source; 2144 source->shadow_count++; 2145 source->generation++; 2146 } 2147 2148 for (idx = 0; idx < size; idx++) { 2149 vm_page_t m; 2150 2151 retry: 2152 m = vm_page_lookup(orig_object, offidxstart + idx); 2153 if (m == NULL) 2154 continue; 2155 2156 /* 2157 * We must wait for pending I/O to complete before we can 2158 * rename the page. 2159 * 2160 * We do not have to VM_PROT_NONE the page as mappings should 2161 * not be changed by this operation. 2162 */ 2163 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2164 goto retry; 2165 2166 vm_page_busy(m); 2167 vm_page_rename(m, new_object, idx); 2168 /* page automatically made dirty by rename and cache handled */ 2169 vm_page_busy(m); 2170 } 2171 2172 if (orig_object->type == OBJT_SWAP) { 2173 vm_object_pip_add(orig_object, 1); 2174 /* 2175 * copy orig_object pages into new_object 2176 * and destroy unneeded pages in 2177 * shadow object. 2178 */ 2179 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2180 vm_object_pip_wakeup(orig_object); 2181 } 2182 2183 for (idx = 0; idx < size; idx++) { 2184 m = vm_page_lookup(new_object, idx); 2185 if (m) { 2186 vm_page_wakeup(m); 2187 } 2188 } 2189 2190 entry->object.vm_object = new_object; 2191 entry->offset = 0LL; 2192 vm_object_deallocate(orig_object); 2193 } 2194 2195 /* 2196 * vm_map_copy_entry: 2197 * 2198 * Copies the contents of the source entry to the destination 2199 * entry. The entries *must* be aligned properly. 2200 */ 2201 static void 2202 vm_map_copy_entry( 2203 vm_map_t src_map, 2204 vm_map_t dst_map, 2205 vm_map_entry_t src_entry, 2206 vm_map_entry_t dst_entry) 2207 { 2208 vm_object_t src_object; 2209 2210 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2211 return; 2212 2213 if (src_entry->wired_count == 0) { 2214 2215 /* 2216 * If the source entry is marked needs_copy, it is already 2217 * write-protected. 2218 */ 2219 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2220 pmap_protect(src_map->pmap, 2221 src_entry->start, 2222 src_entry->end, 2223 src_entry->protection & ~VM_PROT_WRITE); 2224 } 2225 2226 /* 2227 * Make a copy of the object. 2228 */ 2229 if ((src_object = src_entry->object.vm_object) != NULL) { 2230 2231 if ((src_object->handle == NULL) && 2232 (src_object->type == OBJT_DEFAULT || 2233 src_object->type == OBJT_SWAP)) { 2234 vm_object_collapse(src_object); 2235 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2236 vm_map_split(src_entry); 2237 src_object = src_entry->object.vm_object; 2238 } 2239 } 2240 2241 vm_object_reference(src_object); 2242 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2243 dst_entry->object.vm_object = src_object; 2244 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2245 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2246 dst_entry->offset = src_entry->offset; 2247 } else { 2248 dst_entry->object.vm_object = NULL; 2249 dst_entry->offset = 0; 2250 } 2251 2252 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2253 dst_entry->end - dst_entry->start, src_entry->start); 2254 } else { 2255 /* 2256 * Of course, wired down pages can't be set copy-on-write. 2257 * Cause wired pages to be copied into the new map by 2258 * simulating faults (the new pages are pageable) 2259 */ 2260 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2261 } 2262 } 2263 2264 /* 2265 * vmspace_fork: 2266 * Create a new process vmspace structure and vm_map 2267 * based on those of an existing process. The new map 2268 * is based on the old map, according to the inheritance 2269 * values on the regions in that map. 2270 * 2271 * The source map must not be locked. 2272 */ 2273 struct vmspace * 2274 vmspace_fork(struct vmspace *vm1) 2275 { 2276 struct vmspace *vm2; 2277 vm_map_t old_map = &vm1->vm_map; 2278 vm_map_t new_map; 2279 vm_map_entry_t old_entry; 2280 vm_map_entry_t new_entry; 2281 vm_object_t object; 2282 2283 GIANT_REQUIRED; 2284 2285 vm_map_lock(old_map); 2286 old_map->infork = 1; 2287 2288 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2289 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2290 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2291 new_map = &vm2->vm_map; /* XXX */ 2292 new_map->timestamp = 1; 2293 2294 old_entry = old_map->header.next; 2295 2296 while (old_entry != &old_map->header) { 2297 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2298 panic("vm_map_fork: encountered a submap"); 2299 2300 switch (old_entry->inheritance) { 2301 case VM_INHERIT_NONE: 2302 break; 2303 2304 case VM_INHERIT_SHARE: 2305 /* 2306 * Clone the entry, creating the shared object if necessary. 2307 */ 2308 object = old_entry->object.vm_object; 2309 if (object == NULL) { 2310 object = vm_object_allocate(OBJT_DEFAULT, 2311 atop(old_entry->end - old_entry->start)); 2312 old_entry->object.vm_object = object; 2313 old_entry->offset = (vm_offset_t) 0; 2314 } 2315 2316 /* 2317 * Add the reference before calling vm_object_shadow 2318 * to insure that a shadow object is created. 2319 */ 2320 vm_object_reference(object); 2321 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2322 vm_object_shadow(&old_entry->object.vm_object, 2323 &old_entry->offset, 2324 atop(old_entry->end - old_entry->start)); 2325 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2326 /* Transfer the second reference too. */ 2327 vm_object_reference( 2328 old_entry->object.vm_object); 2329 vm_object_deallocate(object); 2330 object = old_entry->object.vm_object; 2331 } 2332 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2333 2334 /* 2335 * Clone the entry, referencing the shared object. 2336 */ 2337 new_entry = vm_map_entry_create(new_map); 2338 *new_entry = *old_entry; 2339 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2340 new_entry->wired_count = 0; 2341 2342 /* 2343 * Insert the entry into the new map -- we know we're 2344 * inserting at the end of the new map. 2345 */ 2346 2347 vm_map_entry_link(new_map, new_map->header.prev, 2348 new_entry); 2349 2350 /* 2351 * Update the physical map 2352 */ 2353 2354 pmap_copy(new_map->pmap, old_map->pmap, 2355 new_entry->start, 2356 (old_entry->end - old_entry->start), 2357 old_entry->start); 2358 break; 2359 2360 case VM_INHERIT_COPY: 2361 /* 2362 * Clone the entry and link into the map. 2363 */ 2364 new_entry = vm_map_entry_create(new_map); 2365 *new_entry = *old_entry; 2366 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2367 new_entry->wired_count = 0; 2368 new_entry->object.vm_object = NULL; 2369 vm_map_entry_link(new_map, new_map->header.prev, 2370 new_entry); 2371 vm_map_copy_entry(old_map, new_map, old_entry, 2372 new_entry); 2373 break; 2374 } 2375 old_entry = old_entry->next; 2376 } 2377 2378 new_map->size = old_map->size; 2379 old_map->infork = 0; 2380 vm_map_unlock(old_map); 2381 2382 return (vm2); 2383 } 2384 2385 int 2386 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2387 vm_prot_t prot, vm_prot_t max, int cow) 2388 { 2389 vm_map_entry_t prev_entry; 2390 vm_map_entry_t new_stack_entry; 2391 vm_size_t init_ssize; 2392 int rv; 2393 2394 GIANT_REQUIRED; 2395 2396 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2397 return (KERN_NO_SPACE); 2398 2399 if (max_ssize < SGROWSIZ) 2400 init_ssize = max_ssize; 2401 else 2402 init_ssize = SGROWSIZ; 2403 2404 vm_map_lock(map); 2405 2406 /* If addr is already mapped, no go */ 2407 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2408 vm_map_unlock(map); 2409 return (KERN_NO_SPACE); 2410 } 2411 2412 /* If we can't accomodate max_ssize in the current mapping, 2413 * no go. However, we need to be aware that subsequent user 2414 * mappings might map into the space we have reserved for 2415 * stack, and currently this space is not protected. 2416 * 2417 * Hopefully we will at least detect this condition 2418 * when we try to grow the stack. 2419 */ 2420 if ((prev_entry->next != &map->header) && 2421 (prev_entry->next->start < addrbos + max_ssize)) { 2422 vm_map_unlock(map); 2423 return (KERN_NO_SPACE); 2424 } 2425 2426 /* We initially map a stack of only init_ssize. We will 2427 * grow as needed later. Since this is to be a grow 2428 * down stack, we map at the top of the range. 2429 * 2430 * Note: we would normally expect prot and max to be 2431 * VM_PROT_ALL, and cow to be 0. Possibly we should 2432 * eliminate these as input parameters, and just 2433 * pass these values here in the insert call. 2434 */ 2435 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2436 addrbos + max_ssize, prot, max, cow); 2437 2438 /* Now set the avail_ssize amount */ 2439 if (rv == KERN_SUCCESS){ 2440 if (prev_entry != &map->header) 2441 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2442 new_stack_entry = prev_entry->next; 2443 if (new_stack_entry->end != addrbos + max_ssize || 2444 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2445 panic ("Bad entry start/end for new stack entry"); 2446 else 2447 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2448 } 2449 2450 vm_map_unlock(map); 2451 return (rv); 2452 } 2453 2454 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2455 * desired address is already mapped, or if we successfully grow 2456 * the stack. Also returns KERN_SUCCESS if addr is outside the 2457 * stack range (this is strange, but preserves compatibility with 2458 * the grow function in vm_machdep.c). 2459 */ 2460 int 2461 vm_map_growstack (struct proc *p, vm_offset_t addr) 2462 { 2463 vm_map_entry_t prev_entry; 2464 vm_map_entry_t stack_entry; 2465 vm_map_entry_t new_stack_entry; 2466 struct vmspace *vm = p->p_vmspace; 2467 vm_map_t map = &vm->vm_map; 2468 vm_offset_t end; 2469 int grow_amount; 2470 int rv; 2471 int is_procstack; 2472 2473 GIANT_REQUIRED; 2474 2475 Retry: 2476 vm_map_lock_read(map); 2477 2478 /* If addr is already in the entry range, no need to grow.*/ 2479 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2480 vm_map_unlock_read(map); 2481 return (KERN_SUCCESS); 2482 } 2483 2484 if ((stack_entry = prev_entry->next) == &map->header) { 2485 vm_map_unlock_read(map); 2486 return (KERN_SUCCESS); 2487 } 2488 if (prev_entry == &map->header) 2489 end = stack_entry->start - stack_entry->avail_ssize; 2490 else 2491 end = prev_entry->end; 2492 2493 /* This next test mimics the old grow function in vm_machdep.c. 2494 * It really doesn't quite make sense, but we do it anyway 2495 * for compatibility. 2496 * 2497 * If not growable stack, return success. This signals the 2498 * caller to proceed as he would normally with normal vm. 2499 */ 2500 if (stack_entry->avail_ssize < 1 || 2501 addr >= stack_entry->start || 2502 addr < stack_entry->start - stack_entry->avail_ssize) { 2503 vm_map_unlock_read(map); 2504 return (KERN_SUCCESS); 2505 } 2506 2507 /* Find the minimum grow amount */ 2508 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2509 if (grow_amount > stack_entry->avail_ssize) { 2510 vm_map_unlock_read(map); 2511 return (KERN_NO_SPACE); 2512 } 2513 2514 /* If there is no longer enough space between the entries 2515 * nogo, and adjust the available space. Note: this 2516 * should only happen if the user has mapped into the 2517 * stack area after the stack was created, and is 2518 * probably an error. 2519 * 2520 * This also effectively destroys any guard page the user 2521 * might have intended by limiting the stack size. 2522 */ 2523 if (grow_amount > stack_entry->start - end) { 2524 if (vm_map_lock_upgrade(map)) 2525 goto Retry; 2526 2527 stack_entry->avail_ssize = stack_entry->start - end; 2528 2529 vm_map_unlock(map); 2530 return (KERN_NO_SPACE); 2531 } 2532 2533 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2534 2535 /* If this is the main process stack, see if we're over the 2536 * stack limit. 2537 */ 2538 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2539 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2540 vm_map_unlock_read(map); 2541 return (KERN_NO_SPACE); 2542 } 2543 2544 /* Round up the grow amount modulo SGROWSIZ */ 2545 grow_amount = roundup (grow_amount, SGROWSIZ); 2546 if (grow_amount > stack_entry->avail_ssize) { 2547 grow_amount = stack_entry->avail_ssize; 2548 } 2549 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2550 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2551 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2552 ctob(vm->vm_ssize); 2553 } 2554 2555 if (vm_map_lock_upgrade(map)) 2556 goto Retry; 2557 2558 /* Get the preliminary new entry start value */ 2559 addr = stack_entry->start - grow_amount; 2560 2561 /* If this puts us into the previous entry, cut back our growth 2562 * to the available space. Also, see the note above. 2563 */ 2564 if (addr < end) { 2565 stack_entry->avail_ssize = stack_entry->start - end; 2566 addr = end; 2567 } 2568 2569 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2570 VM_PROT_ALL, 2571 VM_PROT_ALL, 2572 0); 2573 2574 /* Adjust the available stack space by the amount we grew. */ 2575 if (rv == KERN_SUCCESS) { 2576 if (prev_entry != &map->header) 2577 vm_map_clip_end(map, prev_entry, addr); 2578 new_stack_entry = prev_entry->next; 2579 if (new_stack_entry->end != stack_entry->start || 2580 new_stack_entry->start != addr) 2581 panic ("Bad stack grow start/end in new stack entry"); 2582 else { 2583 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2584 (new_stack_entry->end - 2585 new_stack_entry->start); 2586 if (is_procstack) 2587 vm->vm_ssize += btoc(new_stack_entry->end - 2588 new_stack_entry->start); 2589 } 2590 } 2591 2592 vm_map_unlock(map); 2593 return (rv); 2594 } 2595 2596 /* 2597 * Unshare the specified VM space for exec. If other processes are 2598 * mapped to it, then create a new one. The new vmspace is null. 2599 */ 2600 2601 void 2602 vmspace_exec(struct proc *p) 2603 { 2604 struct vmspace *oldvmspace = p->p_vmspace; 2605 struct vmspace *newvmspace; 2606 vm_map_t map = &p->p_vmspace->vm_map; 2607 2608 GIANT_REQUIRED; 2609 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2610 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2611 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2612 /* 2613 * This code is written like this for prototype purposes. The 2614 * goal is to avoid running down the vmspace here, but let the 2615 * other process's that are still using the vmspace to finally 2616 * run it down. Even though there is little or no chance of blocking 2617 * here, it is a good idea to keep this form for future mods. 2618 */ 2619 p->p_vmspace = newvmspace; 2620 pmap_pinit2(vmspace_pmap(newvmspace)); 2621 vmspace_free(oldvmspace); 2622 if (p == curthread->td_proc) /* XXXKSE ? */ 2623 pmap_activate(curthread); 2624 } 2625 2626 /* 2627 * Unshare the specified VM space for forcing COW. This 2628 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2629 */ 2630 2631 void 2632 vmspace_unshare(struct proc *p) 2633 { 2634 struct vmspace *oldvmspace = p->p_vmspace; 2635 struct vmspace *newvmspace; 2636 2637 GIANT_REQUIRED; 2638 if (oldvmspace->vm_refcnt == 1) 2639 return; 2640 newvmspace = vmspace_fork(oldvmspace); 2641 p->p_vmspace = newvmspace; 2642 pmap_pinit2(vmspace_pmap(newvmspace)); 2643 vmspace_free(oldvmspace); 2644 if (p == curthread->td_proc) /* XXXKSE ? */ 2645 pmap_activate(curthread); 2646 } 2647 2648 2649 /* 2650 * vm_map_lookup: 2651 * 2652 * Finds the VM object, offset, and 2653 * protection for a given virtual address in the 2654 * specified map, assuming a page fault of the 2655 * type specified. 2656 * 2657 * Leaves the map in question locked for read; return 2658 * values are guaranteed until a vm_map_lookup_done 2659 * call is performed. Note that the map argument 2660 * is in/out; the returned map must be used in 2661 * the call to vm_map_lookup_done. 2662 * 2663 * A handle (out_entry) is returned for use in 2664 * vm_map_lookup_done, to make that fast. 2665 * 2666 * If a lookup is requested with "write protection" 2667 * specified, the map may be changed to perform virtual 2668 * copying operations, although the data referenced will 2669 * remain the same. 2670 */ 2671 int 2672 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2673 vm_offset_t vaddr, 2674 vm_prot_t fault_typea, 2675 vm_map_entry_t *out_entry, /* OUT */ 2676 vm_object_t *object, /* OUT */ 2677 vm_pindex_t *pindex, /* OUT */ 2678 vm_prot_t *out_prot, /* OUT */ 2679 boolean_t *wired) /* OUT */ 2680 { 2681 vm_map_entry_t entry; 2682 vm_map_t map = *var_map; 2683 vm_prot_t prot; 2684 vm_prot_t fault_type = fault_typea; 2685 2686 GIANT_REQUIRED; 2687 RetryLookup:; 2688 2689 /* 2690 * Lookup the faulting address. 2691 */ 2692 2693 vm_map_lock_read(map); 2694 2695 #define RETURN(why) \ 2696 { \ 2697 vm_map_unlock_read(map); \ 2698 return(why); \ 2699 } 2700 2701 /* 2702 * If the map has an interesting hint, try it before calling full 2703 * blown lookup routine. 2704 */ 2705 2706 entry = map->hint; 2707 2708 *out_entry = entry; 2709 2710 if ((entry == &map->header) || 2711 (vaddr < entry->start) || (vaddr >= entry->end)) { 2712 vm_map_entry_t tmp_entry; 2713 2714 /* 2715 * Entry was either not a valid hint, or the vaddr was not 2716 * contained in the entry, so do a full lookup. 2717 */ 2718 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2719 RETURN(KERN_INVALID_ADDRESS); 2720 2721 entry = tmp_entry; 2722 *out_entry = entry; 2723 } 2724 2725 /* 2726 * Handle submaps. 2727 */ 2728 2729 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2730 vm_map_t old_map = map; 2731 2732 *var_map = map = entry->object.sub_map; 2733 vm_map_unlock_read(old_map); 2734 goto RetryLookup; 2735 } 2736 2737 /* 2738 * Check whether this task is allowed to have this page. 2739 * Note the special case for MAP_ENTRY_COW 2740 * pages with an override. This is to implement a forced 2741 * COW for debuggers. 2742 */ 2743 2744 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2745 prot = entry->max_protection; 2746 else 2747 prot = entry->protection; 2748 2749 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2750 if ((fault_type & prot) != fault_type) { 2751 RETURN(KERN_PROTECTION_FAILURE); 2752 } 2753 2754 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2755 (entry->eflags & MAP_ENTRY_COW) && 2756 (fault_type & VM_PROT_WRITE) && 2757 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2758 RETURN(KERN_PROTECTION_FAILURE); 2759 } 2760 2761 /* 2762 * If this page is not pageable, we have to get it for all possible 2763 * accesses. 2764 */ 2765 2766 *wired = (entry->wired_count != 0); 2767 if (*wired) 2768 prot = fault_type = entry->protection; 2769 2770 /* 2771 * If the entry was copy-on-write, we either ... 2772 */ 2773 2774 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2775 /* 2776 * If we want to write the page, we may as well handle that 2777 * now since we've got the map locked. 2778 * 2779 * If we don't need to write the page, we just demote the 2780 * permissions allowed. 2781 */ 2782 2783 if (fault_type & VM_PROT_WRITE) { 2784 /* 2785 * Make a new object, and place it in the object 2786 * chain. Note that no new references have appeared 2787 * -- one just moved from the map to the new 2788 * object. 2789 */ 2790 2791 if (vm_map_lock_upgrade(map)) 2792 goto RetryLookup; 2793 2794 vm_object_shadow( 2795 &entry->object.vm_object, 2796 &entry->offset, 2797 atop(entry->end - entry->start)); 2798 2799 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2800 vm_map_lock_downgrade(map); 2801 } else { 2802 /* 2803 * We're attempting to read a copy-on-write page -- 2804 * don't allow writes. 2805 */ 2806 2807 prot &= ~VM_PROT_WRITE; 2808 } 2809 } 2810 2811 /* 2812 * Create an object if necessary. 2813 */ 2814 if (entry->object.vm_object == NULL && 2815 !map->system_map) { 2816 if (vm_map_lock_upgrade(map)) 2817 goto RetryLookup; 2818 2819 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2820 atop(entry->end - entry->start)); 2821 entry->offset = 0; 2822 vm_map_lock_downgrade(map); 2823 } 2824 2825 /* 2826 * Return the object/offset from this entry. If the entry was 2827 * copy-on-write or empty, it has been fixed up. 2828 */ 2829 2830 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2831 *object = entry->object.vm_object; 2832 2833 /* 2834 * Return whether this is the only map sharing this data. 2835 */ 2836 2837 *out_prot = prot; 2838 return (KERN_SUCCESS); 2839 2840 #undef RETURN 2841 } 2842 2843 /* 2844 * vm_map_lookup_done: 2845 * 2846 * Releases locks acquired by a vm_map_lookup 2847 * (according to the handle returned by that lookup). 2848 */ 2849 2850 void 2851 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 2852 { 2853 /* 2854 * Unlock the main-level map 2855 */ 2856 GIANT_REQUIRED; 2857 vm_map_unlock_read(map); 2858 } 2859 2860 /* 2861 * Implement uiomove with VM operations. This handles (and collateral changes) 2862 * support every combination of source object modification, and COW type 2863 * operations. 2864 */ 2865 int 2866 vm_uiomove( 2867 vm_map_t mapa, 2868 vm_object_t srcobject, 2869 off_t cp, 2870 int cnta, 2871 vm_offset_t uaddra, 2872 int *npages) 2873 { 2874 vm_map_t map; 2875 vm_object_t first_object, oldobject, object; 2876 vm_map_entry_t entry; 2877 vm_prot_t prot; 2878 boolean_t wired; 2879 int tcnt, rv; 2880 vm_offset_t uaddr, start, end, tend; 2881 vm_pindex_t first_pindex, osize, oindex; 2882 off_t ooffset; 2883 int cnt; 2884 2885 GIANT_REQUIRED; 2886 2887 if (npages) 2888 *npages = 0; 2889 2890 cnt = cnta; 2891 uaddr = uaddra; 2892 2893 while (cnt > 0) { 2894 map = mapa; 2895 2896 if ((vm_map_lookup(&map, uaddr, 2897 VM_PROT_READ, &entry, &first_object, 2898 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2899 return EFAULT; 2900 } 2901 2902 vm_map_clip_start(map, entry, uaddr); 2903 2904 tcnt = cnt; 2905 tend = uaddr + tcnt; 2906 if (tend > entry->end) { 2907 tcnt = entry->end - uaddr; 2908 tend = entry->end; 2909 } 2910 2911 vm_map_clip_end(map, entry, tend); 2912 2913 start = entry->start; 2914 end = entry->end; 2915 2916 osize = atop(tcnt); 2917 2918 oindex = OFF_TO_IDX(cp); 2919 if (npages) { 2920 vm_pindex_t idx; 2921 for (idx = 0; idx < osize; idx++) { 2922 vm_page_t m; 2923 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2924 vm_map_lookup_done(map, entry); 2925 return 0; 2926 } 2927 /* 2928 * disallow busy or invalid pages, but allow 2929 * m->busy pages if they are entirely valid. 2930 */ 2931 if ((m->flags & PG_BUSY) || 2932 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2933 vm_map_lookup_done(map, entry); 2934 return 0; 2935 } 2936 } 2937 } 2938 2939 /* 2940 * If we are changing an existing map entry, just redirect 2941 * the object, and change mappings. 2942 */ 2943 if ((first_object->type == OBJT_VNODE) && 2944 ((oldobject = entry->object.vm_object) == first_object)) { 2945 2946 if ((entry->offset != cp) || (oldobject != srcobject)) { 2947 /* 2948 * Remove old window into the file 2949 */ 2950 pmap_remove (map->pmap, uaddr, tend); 2951 2952 /* 2953 * Force copy on write for mmaped regions 2954 */ 2955 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2956 2957 /* 2958 * Point the object appropriately 2959 */ 2960 if (oldobject != srcobject) { 2961 2962 /* 2963 * Set the object optimization hint flag 2964 */ 2965 vm_object_set_flag(srcobject, OBJ_OPT); 2966 vm_object_reference(srcobject); 2967 entry->object.vm_object = srcobject; 2968 2969 if (oldobject) { 2970 vm_object_deallocate(oldobject); 2971 } 2972 } 2973 2974 entry->offset = cp; 2975 map->timestamp++; 2976 } else { 2977 pmap_remove (map->pmap, uaddr, tend); 2978 } 2979 2980 } else if ((first_object->ref_count == 1) && 2981 (first_object->size == osize) && 2982 ((first_object->type == OBJT_DEFAULT) || 2983 (first_object->type == OBJT_SWAP)) ) { 2984 2985 oldobject = first_object->backing_object; 2986 2987 if ((first_object->backing_object_offset != cp) || 2988 (oldobject != srcobject)) { 2989 /* 2990 * Remove old window into the file 2991 */ 2992 pmap_remove (map->pmap, uaddr, tend); 2993 2994 /* 2995 * Remove unneeded old pages 2996 */ 2997 vm_object_page_remove(first_object, 0, 0, 0); 2998 2999 /* 3000 * Invalidate swap space 3001 */ 3002 if (first_object->type == OBJT_SWAP) { 3003 swap_pager_freespace(first_object, 3004 0, 3005 first_object->size); 3006 } 3007 3008 /* 3009 * Force copy on write for mmaped regions 3010 */ 3011 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3012 3013 /* 3014 * Point the object appropriately 3015 */ 3016 if (oldobject != srcobject) { 3017 3018 /* 3019 * Set the object optimization hint flag 3020 */ 3021 vm_object_set_flag(srcobject, OBJ_OPT); 3022 vm_object_reference(srcobject); 3023 3024 if (oldobject) { 3025 TAILQ_REMOVE(&oldobject->shadow_head, 3026 first_object, shadow_list); 3027 oldobject->shadow_count--; 3028 /* XXX bump generation? */ 3029 vm_object_deallocate(oldobject); 3030 } 3031 3032 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 3033 first_object, shadow_list); 3034 srcobject->shadow_count++; 3035 /* XXX bump generation? */ 3036 3037 first_object->backing_object = srcobject; 3038 } 3039 first_object->backing_object_offset = cp; 3040 map->timestamp++; 3041 } else { 3042 pmap_remove (map->pmap, uaddr, tend); 3043 } 3044 /* 3045 * Otherwise, we have to do a logical mmap. 3046 */ 3047 } else { 3048 3049 vm_object_set_flag(srcobject, OBJ_OPT); 3050 vm_object_reference(srcobject); 3051 3052 pmap_remove (map->pmap, uaddr, tend); 3053 3054 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3055 vm_map_lock_upgrade(map); 3056 3057 if (entry == &map->header) { 3058 map->first_free = &map->header; 3059 } else if (map->first_free->start >= start) { 3060 map->first_free = entry->prev; 3061 } 3062 3063 SAVE_HINT(map, entry->prev); 3064 vm_map_entry_delete(map, entry); 3065 3066 object = srcobject; 3067 ooffset = cp; 3068 3069 rv = vm_map_insert(map, object, ooffset, start, tend, 3070 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3071 3072 if (rv != KERN_SUCCESS) 3073 panic("vm_uiomove: could not insert new entry: %d", rv); 3074 } 3075 3076 /* 3077 * Map the window directly, if it is already in memory 3078 */ 3079 pmap_object_init_pt(map->pmap, uaddr, 3080 srcobject, oindex, tcnt, 0); 3081 3082 map->timestamp++; 3083 vm_map_unlock(map); 3084 3085 cnt -= tcnt; 3086 uaddr += tcnt; 3087 cp += tcnt; 3088 if (npages) 3089 *npages += osize; 3090 } 3091 return 0; 3092 } 3093 3094 /* 3095 * Performs the copy_on_write operations necessary to allow the virtual copies 3096 * into user space to work. This has to be called for write(2) system calls 3097 * from other processes, file unlinking, and file size shrinkage. 3098 */ 3099 void 3100 vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 3101 { 3102 int rv; 3103 vm_object_t robject; 3104 vm_pindex_t idx; 3105 3106 GIANT_REQUIRED; 3107 if ((object == NULL) || 3108 ((object->flags & OBJ_OPT) == 0)) 3109 return; 3110 3111 if (object->shadow_count > object->ref_count) 3112 panic("vm_freeze_copyopts: sc > rc"); 3113 3114 while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 3115 vm_pindex_t bo_pindex; 3116 vm_page_t m_in, m_out; 3117 3118 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3119 3120 vm_object_reference(robject); 3121 3122 vm_object_pip_wait(robject, "objfrz"); 3123 3124 if (robject->ref_count == 1) { 3125 vm_object_deallocate(robject); 3126 continue; 3127 } 3128 3129 vm_object_pip_add(robject, 1); 3130 3131 for (idx = 0; idx < robject->size; idx++) { 3132 3133 m_out = vm_page_grab(robject, idx, 3134 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3135 3136 if (m_out->valid == 0) { 3137 m_in = vm_page_grab(object, bo_pindex + idx, 3138 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3139 if (m_in->valid == 0) { 3140 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3141 if (rv != VM_PAGER_OK) { 3142 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3143 continue; 3144 } 3145 vm_page_deactivate(m_in); 3146 } 3147 3148 vm_page_protect(m_in, VM_PROT_NONE); 3149 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3150 m_out->valid = m_in->valid; 3151 vm_page_dirty(m_out); 3152 vm_page_activate(m_out); 3153 vm_page_wakeup(m_in); 3154 } 3155 vm_page_wakeup(m_out); 3156 } 3157 3158 object->shadow_count--; 3159 object->ref_count--; 3160 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 3161 robject->backing_object = NULL; 3162 robject->backing_object_offset = 0; 3163 3164 vm_object_pip_wakeup(robject); 3165 vm_object_deallocate(robject); 3166 } 3167 3168 vm_object_clear_flag(object, OBJ_OPT); 3169 } 3170 3171 #include "opt_ddb.h" 3172 #ifdef DDB 3173 #include <sys/kernel.h> 3174 3175 #include <ddb/ddb.h> 3176 3177 /* 3178 * vm_map_print: [ debug ] 3179 */ 3180 DB_SHOW_COMMAND(map, vm_map_print) 3181 { 3182 static int nlines; 3183 /* XXX convert args. */ 3184 vm_map_t map = (vm_map_t)addr; 3185 boolean_t full = have_addr; 3186 3187 vm_map_entry_t entry; 3188 3189 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3190 (void *)map, 3191 (void *)map->pmap, map->nentries, map->timestamp); 3192 nlines++; 3193 3194 if (!full && db_indent) 3195 return; 3196 3197 db_indent += 2; 3198 for (entry = map->header.next; entry != &map->header; 3199 entry = entry->next) { 3200 db_iprintf("map entry %p: start=%p, end=%p\n", 3201 (void *)entry, (void *)entry->start, (void *)entry->end); 3202 nlines++; 3203 { 3204 static char *inheritance_name[4] = 3205 {"share", "copy", "none", "donate_copy"}; 3206 3207 db_iprintf(" prot=%x/%x/%s", 3208 entry->protection, 3209 entry->max_protection, 3210 inheritance_name[(int)(unsigned char)entry->inheritance]); 3211 if (entry->wired_count != 0) 3212 db_printf(", wired"); 3213 } 3214 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3215 /* XXX no %qd in kernel. Truncate entry->offset. */ 3216 db_printf(", share=%p, offset=0x%lx\n", 3217 (void *)entry->object.sub_map, 3218 (long)entry->offset); 3219 nlines++; 3220 if ((entry->prev == &map->header) || 3221 (entry->prev->object.sub_map != 3222 entry->object.sub_map)) { 3223 db_indent += 2; 3224 vm_map_print((db_expr_t)(intptr_t) 3225 entry->object.sub_map, 3226 full, 0, (char *)0); 3227 db_indent -= 2; 3228 } 3229 } else { 3230 /* XXX no %qd in kernel. Truncate entry->offset. */ 3231 db_printf(", object=%p, offset=0x%lx", 3232 (void *)entry->object.vm_object, 3233 (long)entry->offset); 3234 if (entry->eflags & MAP_ENTRY_COW) 3235 db_printf(", copy (%s)", 3236 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3237 db_printf("\n"); 3238 nlines++; 3239 3240 if ((entry->prev == &map->header) || 3241 (entry->prev->object.vm_object != 3242 entry->object.vm_object)) { 3243 db_indent += 2; 3244 vm_object_print((db_expr_t)(intptr_t) 3245 entry->object.vm_object, 3246 full, 0, (char *)0); 3247 nlines += 4; 3248 db_indent -= 2; 3249 } 3250 } 3251 } 3252 db_indent -= 2; 3253 if (db_indent == 0) 3254 nlines = 0; 3255 } 3256 3257 3258 DB_SHOW_COMMAND(procvm, procvm) 3259 { 3260 struct proc *p; 3261 3262 if (have_addr) { 3263 p = (struct proc *) addr; 3264 } else { 3265 p = curproc; 3266 } 3267 3268 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3269 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3270 (void *)vmspace_pmap(p->p_vmspace)); 3271 3272 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3273 } 3274 3275 #endif /* DDB */ 3276