1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_map.c,v 1.104 1998/01/06 05:25:58 dyson Exp $ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/mman.h> 77 #include <sys/buf.h> 78 #include <sys/vnode.h> 79 80 #include <vm/vm.h> 81 #include <vm/vm_param.h> 82 #include <vm/vm_prot.h> 83 #include <vm/vm_inherit.h> 84 #include <sys/lock.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_kern.h> 91 #include <vm/vm_extern.h> 92 #include <vm/default_pager.h> 93 #include <vm/vm_zone.h> 94 95 static MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures"); 96 97 /* 98 * Virtual memory maps provide for the mapping, protection, 99 * and sharing of virtual memory objects. In addition, 100 * this module provides for an efficient virtual copy of 101 * memory from one map to another. 102 * 103 * Synchronization is required prior to most operations. 104 * 105 * Maps consist of an ordered doubly-linked list of simple 106 * entries; a single hint is used to speed up lookups. 107 * 108 * In order to properly represent the sharing of virtual 109 * memory regions among maps, the map structure is bi-level. 110 * Top-level ("address") maps refer to regions of sharable 111 * virtual memory. These regions are implemented as 112 * ("sharing") maps, which then refer to the actual virtual 113 * memory objects. When two address maps "share" memory, 114 * their top-level maps both have references to the same 115 * sharing map. When memory is virtual-copied from one 116 * address map to another, the references in the sharing 117 * maps are actually copied -- no copying occurs at the 118 * virtual memory object level. 119 * 120 * Since portions of maps are specified by start/end addreses, 121 * which may not align with existing map entries, all 122 * routines merely "clip" entries to these start/end values. 123 * [That is, an entry is split into two, bordering at a 124 * start or end value.] Note that these clippings may not 125 * always be necessary (as the two resulting entries are then 126 * not changed); however, the clipping is done for convenience. 127 * No attempt is currently made to "glue back together" two 128 * abutting entries. 129 * 130 * As mentioned above, virtual copy operations are performed 131 * by copying VM object references from one sharing map to 132 * another, and then marking both regions as copy-on-write. 133 * It is important to note that only one writeable reference 134 * to a VM object region exists in any map -- this means that 135 * shadow object creation can be delayed until a write operation 136 * occurs. 137 */ 138 139 /* 140 * vm_map_startup: 141 * 142 * Initialize the vm_map module. Must be called before 143 * any other vm_map routines. 144 * 145 * Map and entry structures are allocated from the general 146 * purpose memory pool with some exceptions: 147 * 148 * - The kernel map and kmem submap are allocated statically. 149 * - Kernel map entries are allocated out of a static pool. 150 * 151 * These restrictions are necessary since malloc() uses the 152 * maps and requires map entries. 153 */ 154 155 extern char kstack[]; 156 extern int inmprotect; 157 158 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store; 159 static vm_zone_t mapentzone, kmapentzone, mapzone; 160 static struct vm_object kmapentobj, mapentobj, mapobj; 161 #define MAP_ENTRY_INIT 128 162 struct vm_map_entry map_entry_init[MAX_MAPENT]; 163 struct vm_map_entry kmap_entry_init[MAX_KMAPENT]; 164 struct vm_map map_init[MAX_KMAP]; 165 166 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 167 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 168 static vm_map_entry_t vm_map_entry_create __P((vm_map_t)); 169 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t)); 170 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); 171 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 172 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, 173 vm_map_entry_t)); 174 static vm_page_t vm_freeze_page_alloc __P((vm_object_t, vm_pindex_t)); 175 176 void 177 vm_map_startup() 178 { 179 mapzone = &mapzone_store; 180 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 181 map_init, MAX_KMAP); 182 kmapentzone = &kmapentzone_store; 183 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry), 184 kmap_entry_init, MAX_KMAPENT); 185 mapentzone = &mapentzone_store; 186 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 187 map_entry_init, MAX_MAPENT); 188 } 189 190 /* 191 * Allocate a vmspace structure, including a vm_map and pmap, 192 * and initialize those structures. The refcnt is set to 1. 193 * The remaining fields must be initialized by the caller. 194 */ 195 struct vmspace * 196 vmspace_alloc(min, max, pageable) 197 vm_offset_t min, max; 198 int pageable; 199 { 200 register struct vmspace *vm; 201 202 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 203 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 204 vm_map_init(&vm->vm_map, min, max, pageable); 205 pmap_pinit(&vm->vm_pmap); 206 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 207 vm->vm_refcnt = 1; 208 return (vm); 209 } 210 211 void 212 vm_init2(void) { 213 zinitna(kmapentzone, &kmapentobj, 214 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1); 215 zinitna(mapentzone, &mapentobj, 216 NULL, 0, 0, 0, 1); 217 zinitna(mapzone, &mapobj, 218 NULL, 0, 0, 0, 1); 219 pmap_init2(); 220 vm_object_init2(); 221 } 222 223 void 224 vmspace_free(vm) 225 register struct vmspace *vm; 226 { 227 228 if (vm->vm_refcnt == 0) 229 panic("vmspace_free: attempt to free already freed vmspace"); 230 231 if (--vm->vm_refcnt == 0) { 232 233 /* 234 * Lock the map, to wait out all other references to it. 235 * Delete all of the mappings and pages they hold, then call 236 * the pmap module to reclaim anything left. 237 */ 238 vm_map_lock(&vm->vm_map); 239 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 240 vm->vm_map.max_offset); 241 vm_map_unlock(&vm->vm_map); 242 243 while( vm->vm_map.ref_count != 1) 244 tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0); 245 --vm->vm_map.ref_count; 246 pmap_release(&vm->vm_pmap); 247 FREE(vm, M_VMMAP); 248 } else { 249 wakeup(&vm->vm_map.ref_count); 250 } 251 } 252 253 /* 254 * vm_map_create: 255 * 256 * Creates and returns a new empty VM map with 257 * the given physical map structure, and having 258 * the given lower and upper address bounds. 259 */ 260 vm_map_t 261 vm_map_create(pmap, min, max, pageable) 262 pmap_t pmap; 263 vm_offset_t min, max; 264 boolean_t pageable; 265 { 266 register vm_map_t result; 267 268 result = zalloc(mapzone); 269 vm_map_init(result, min, max, pageable); 270 result->pmap = pmap; 271 return (result); 272 } 273 274 /* 275 * Initialize an existing vm_map structure 276 * such as that in the vmspace structure. 277 * The pmap is set elsewhere. 278 */ 279 void 280 vm_map_init(map, min, max, pageable) 281 register struct vm_map *map; 282 vm_offset_t min, max; 283 boolean_t pageable; 284 { 285 map->header.next = map->header.prev = &map->header; 286 map->nentries = 0; 287 map->size = 0; 288 map->ref_count = 1; 289 map->is_main_map = TRUE; 290 map->system_map = 0; 291 map->min_offset = min; 292 map->max_offset = max; 293 map->entries_pageable = pageable; 294 map->first_free = &map->header; 295 map->hint = &map->header; 296 map->timestamp = 0; 297 lockinit(&map->lock, PVM, "thrd_sleep", 0, 0); 298 simple_lock_init(&map->ref_lock); 299 } 300 301 /* 302 * vm_map_entry_dispose: [ internal use only ] 303 * 304 * Inverse of vm_map_entry_create. 305 */ 306 static void 307 vm_map_entry_dispose(map, entry) 308 vm_map_t map; 309 vm_map_entry_t entry; 310 { 311 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry); 312 } 313 314 /* 315 * vm_map_entry_create: [ internal use only ] 316 * 317 * Allocates a VM map entry for insertion. 318 * No entry fields are filled in. This routine is 319 */ 320 static vm_map_entry_t 321 vm_map_entry_create(map) 322 vm_map_t map; 323 { 324 return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone); 325 } 326 327 /* 328 * vm_map_entry_{un,}link: 329 * 330 * Insert/remove entries from maps. 331 */ 332 #define vm_map_entry_link(map, after_where, entry) \ 333 { \ 334 (map)->nentries++; \ 335 (entry)->prev = (after_where); \ 336 (entry)->next = (after_where)->next; \ 337 (entry)->prev->next = (entry); \ 338 (entry)->next->prev = (entry); \ 339 } 340 #define vm_map_entry_unlink(map, entry) \ 341 { \ 342 (map)->nentries--; \ 343 (entry)->next->prev = (entry)->prev; \ 344 (entry)->prev->next = (entry)->next; \ 345 } 346 347 /* 348 * vm_map_reference: 349 * 350 * Creates another valid reference to the given map. 351 * 352 */ 353 void 354 vm_map_reference(map) 355 register vm_map_t map; 356 { 357 if (map == NULL) 358 return; 359 360 map->ref_count++; 361 } 362 363 /* 364 * vm_map_deallocate: 365 * 366 * Removes a reference from the specified map, 367 * destroying it if no references remain. 368 * The map should not be locked. 369 */ 370 void 371 vm_map_deallocate(map) 372 register vm_map_t map; 373 { 374 register int c; 375 376 if (map == NULL) 377 return; 378 379 c = map->ref_count; 380 381 if (c == 0) 382 panic("vm_map_deallocate: deallocating already freed map"); 383 384 if (c != 1) { 385 --map->ref_count; 386 wakeup(&map->ref_count); 387 return; 388 } 389 /* 390 * Lock the map, to wait out all other references to it. 391 */ 392 393 vm_map_lock_drain_interlock(map); 394 (void) vm_map_delete(map, map->min_offset, map->max_offset); 395 --map->ref_count; 396 if( map->ref_count != 0) { 397 vm_map_unlock(map); 398 return; 399 } 400 401 pmap_destroy(map->pmap); 402 403 vm_map_unlock(map); 404 405 zfree(mapzone, map); 406 } 407 408 /* 409 * SAVE_HINT: 410 * 411 * Saves the specified entry as the hint for 412 * future lookups. 413 */ 414 #define SAVE_HINT(map,value) \ 415 (map)->hint = (value); 416 417 /* 418 * vm_map_lookup_entry: [ internal use only ] 419 * 420 * Finds the map entry containing (or 421 * immediately preceding) the specified address 422 * in the given map; the entry is returned 423 * in the "entry" parameter. The boolean 424 * result indicates whether the address is 425 * actually contained in the map. 426 */ 427 boolean_t 428 vm_map_lookup_entry(map, address, entry) 429 register vm_map_t map; 430 register vm_offset_t address; 431 vm_map_entry_t *entry; /* OUT */ 432 { 433 register vm_map_entry_t cur; 434 register vm_map_entry_t last; 435 436 /* 437 * Start looking either from the head of the list, or from the hint. 438 */ 439 440 cur = map->hint; 441 442 if (cur == &map->header) 443 cur = cur->next; 444 445 if (address >= cur->start) { 446 /* 447 * Go from hint to end of list. 448 * 449 * But first, make a quick check to see if we are already looking 450 * at the entry we want (which is usually the case). Note also 451 * that we don't need to save the hint here... it is the same 452 * hint (unless we are at the header, in which case the hint 453 * didn't buy us anything anyway). 454 */ 455 last = &map->header; 456 if ((cur != last) && (cur->end > address)) { 457 *entry = cur; 458 return (TRUE); 459 } 460 } else { 461 /* 462 * Go from start to hint, *inclusively* 463 */ 464 last = cur->next; 465 cur = map->header.next; 466 } 467 468 /* 469 * Search linearly 470 */ 471 472 while (cur != last) { 473 if (cur->end > address) { 474 if (address >= cur->start) { 475 /* 476 * Save this lookup for future hints, and 477 * return 478 */ 479 480 *entry = cur; 481 SAVE_HINT(map, cur); 482 return (TRUE); 483 } 484 break; 485 } 486 cur = cur->next; 487 } 488 *entry = cur->prev; 489 SAVE_HINT(map, *entry); 490 return (FALSE); 491 } 492 493 /* 494 * vm_map_insert: 495 * 496 * Inserts the given whole VM object into the target 497 * map at the specified address range. The object's 498 * size should match that of the address range. 499 * 500 * Requires that the map be locked, and leaves it so. 501 */ 502 int 503 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 504 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 505 int cow) 506 { 507 register vm_map_entry_t new_entry; 508 register vm_map_entry_t prev_entry; 509 vm_map_entry_t temp_entry; 510 vm_object_t prev_object; 511 u_char protoeflags; 512 513 if ((object != NULL) && (cow & MAP_NOFAULT)) { 514 panic("vm_map_insert: paradoxical MAP_NOFAULT request"); 515 } 516 517 /* 518 * Check that the start and end points are not bogus. 519 */ 520 521 if ((start < map->min_offset) || (end > map->max_offset) || 522 (start >= end)) 523 return (KERN_INVALID_ADDRESS); 524 525 /* 526 * Find the entry prior to the proposed starting address; if it's part 527 * of an existing entry, this range is bogus. 528 */ 529 530 if (vm_map_lookup_entry(map, start, &temp_entry)) 531 return (KERN_NO_SPACE); 532 533 prev_entry = temp_entry; 534 535 /* 536 * Assert that the next entry doesn't overlap the end point. 537 */ 538 539 if ((prev_entry->next != &map->header) && 540 (prev_entry->next->start < end)) 541 return (KERN_NO_SPACE); 542 543 protoeflags = 0; 544 if (cow & MAP_COPY_NEEDED) 545 protoeflags |= MAP_ENTRY_NEEDS_COPY; 546 547 if (cow & MAP_COPY_ON_WRITE) 548 protoeflags |= MAP_ENTRY_COW; 549 550 if (cow & MAP_NOFAULT) 551 protoeflags |= MAP_ENTRY_NOFAULT; 552 553 /* 554 * See if we can avoid creating a new entry by extending one of our 555 * neighbors. Or at least extend the object. 556 */ 557 558 if ((object == NULL) && 559 (prev_entry != &map->header) && 560 (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) && 561 ((prev_entry->object.vm_object == NULL) || 562 (prev_entry->object.vm_object->type == OBJT_DEFAULT)) && 563 (prev_entry->end == start) && 564 (prev_entry->wired_count == 0)) { 565 566 567 if ((protoeflags == prev_entry->eflags) && 568 ((cow & MAP_NOFAULT) || 569 vm_object_coalesce(prev_entry->object.vm_object, 570 OFF_TO_IDX(prev_entry->offset), 571 (vm_size_t) (prev_entry->end - prev_entry->start), 572 (vm_size_t) (end - prev_entry->end)))) { 573 574 /* 575 * Coalesced the two objects. Can we extend the 576 * previous map entry to include the new range? 577 */ 578 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 579 (prev_entry->protection == prot) && 580 (prev_entry->max_protection == max)) { 581 582 map->size += (end - prev_entry->end); 583 prev_entry->end = end; 584 if ((cow & MAP_NOFAULT) == 0) { 585 prev_object = prev_entry->object.vm_object; 586 default_pager_convert_to_swapq(prev_object); 587 } 588 return (KERN_SUCCESS); 589 } 590 else { 591 object = prev_entry->object.vm_object; 592 offset = prev_entry->offset + (prev_entry->end - 593 prev_entry->start); 594 595 vm_object_reference(object); 596 } 597 } 598 } 599 600 /* 601 * Create a new entry 602 */ 603 604 new_entry = vm_map_entry_create(map); 605 new_entry->start = start; 606 new_entry->end = end; 607 608 new_entry->eflags = protoeflags; 609 new_entry->object.vm_object = object; 610 new_entry->offset = offset; 611 612 if (map->is_main_map) { 613 new_entry->inheritance = VM_INHERIT_DEFAULT; 614 new_entry->protection = prot; 615 new_entry->max_protection = max; 616 new_entry->wired_count = 0; 617 } 618 /* 619 * Insert the new entry into the list 620 */ 621 622 vm_map_entry_link(map, prev_entry, new_entry); 623 map->size += new_entry->end - new_entry->start; 624 625 /* 626 * Update the free space hint 627 */ 628 if ((map->first_free == prev_entry) && 629 (prev_entry->end >= new_entry->start)) 630 map->first_free = new_entry; 631 632 default_pager_convert_to_swapq(object); 633 return (KERN_SUCCESS); 634 } 635 636 /* 637 * Find sufficient space for `length' bytes in the given map, starting at 638 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 639 */ 640 int 641 vm_map_findspace(map, start, length, addr) 642 register vm_map_t map; 643 register vm_offset_t start; 644 vm_size_t length; 645 vm_offset_t *addr; 646 { 647 register vm_map_entry_t entry, next; 648 register vm_offset_t end; 649 650 if (start < map->min_offset) 651 start = map->min_offset; 652 if (start > map->max_offset) 653 return (1); 654 655 /* 656 * Look for the first possible address; if there's already something 657 * at this address, we have to start after it. 658 */ 659 if (start == map->min_offset) { 660 if ((entry = map->first_free) != &map->header) 661 start = entry->end; 662 } else { 663 vm_map_entry_t tmp; 664 665 if (vm_map_lookup_entry(map, start, &tmp)) 666 start = tmp->end; 667 entry = tmp; 668 } 669 670 /* 671 * Look through the rest of the map, trying to fit a new region in the 672 * gap between existing regions, or after the very last region. 673 */ 674 for (;; start = (entry = next)->end) { 675 /* 676 * Find the end of the proposed new region. Be sure we didn't 677 * go beyond the end of the map, or wrap around the address; 678 * if so, we lose. Otherwise, if this is the last entry, or 679 * if the proposed new region fits before the next entry, we 680 * win. 681 */ 682 end = start + length; 683 if (end > map->max_offset || end < start) 684 return (1); 685 next = entry->next; 686 if (next == &map->header || next->start >= end) 687 break; 688 } 689 SAVE_HINT(map, entry); 690 *addr = start; 691 if (map == kernel_map) { 692 vm_offset_t ksize; 693 if ((ksize = round_page(start + length)) > kernel_vm_end) { 694 pmap_growkernel(ksize); 695 } 696 } 697 return (0); 698 } 699 700 /* 701 * vm_map_find finds an unallocated region in the target address 702 * map with the given length. The search is defined to be 703 * first-fit from the specified address; the region found is 704 * returned in the same parameter. 705 * 706 */ 707 int 708 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 709 vm_offset_t *addr, /* IN/OUT */ 710 vm_size_t length, boolean_t find_space, vm_prot_t prot, 711 vm_prot_t max, int cow) 712 { 713 register vm_offset_t start; 714 int result, s = 0; 715 716 start = *addr; 717 718 if (map == kmem_map || map == mb_map) 719 s = splvm(); 720 721 vm_map_lock(map); 722 if (find_space) { 723 if (vm_map_findspace(map, start, length, addr)) { 724 vm_map_unlock(map); 725 if (map == kmem_map || map == mb_map) 726 splx(s); 727 return (KERN_NO_SPACE); 728 } 729 start = *addr; 730 } 731 result = vm_map_insert(map, object, offset, 732 start, start + length, prot, max, cow); 733 vm_map_unlock(map); 734 735 if (map == kmem_map || map == mb_map) 736 splx(s); 737 738 return (result); 739 } 740 741 /* 742 * vm_map_simplify_entry: 743 * 744 * Simplify the given map entry by merging with either neighbor. 745 */ 746 void 747 vm_map_simplify_entry(map, entry) 748 vm_map_t map; 749 vm_map_entry_t entry; 750 { 751 vm_map_entry_t next, prev; 752 vm_size_t prevsize, esize; 753 754 if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP)) 755 return; 756 757 prev = entry->prev; 758 if (prev != &map->header) { 759 prevsize = prev->end - prev->start; 760 if ( (prev->end == entry->start) && 761 (prev->object.vm_object == entry->object.vm_object) && 762 (!prev->object.vm_object || 763 (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) && 764 (!prev->object.vm_object || 765 (prev->offset + prevsize == entry->offset)) && 766 (prev->eflags == entry->eflags) && 767 (prev->protection == entry->protection) && 768 (prev->max_protection == entry->max_protection) && 769 (prev->inheritance == entry->inheritance) && 770 (prev->wired_count == entry->wired_count)) { 771 if (map->first_free == prev) 772 map->first_free = entry; 773 if (map->hint == prev) 774 map->hint = entry; 775 vm_map_entry_unlink(map, prev); 776 entry->start = prev->start; 777 entry->offset = prev->offset; 778 if (prev->object.vm_object) 779 vm_object_deallocate(prev->object.vm_object); 780 vm_map_entry_dispose(map, prev); 781 } 782 } 783 784 next = entry->next; 785 if (next != &map->header) { 786 esize = entry->end - entry->start; 787 if ((entry->end == next->start) && 788 (next->object.vm_object == entry->object.vm_object) && 789 (!next->object.vm_object || 790 (next->object.vm_object->behavior == entry->object.vm_object->behavior)) && 791 (!entry->object.vm_object || 792 (entry->offset + esize == next->offset)) && 793 (next->eflags == entry->eflags) && 794 (next->protection == entry->protection) && 795 (next->max_protection == entry->max_protection) && 796 (next->inheritance == entry->inheritance) && 797 (next->wired_count == entry->wired_count)) { 798 if (map->first_free == next) 799 map->first_free = entry; 800 if (map->hint == next) 801 map->hint = entry; 802 vm_map_entry_unlink(map, next); 803 entry->end = next->end; 804 if (next->object.vm_object) 805 vm_object_deallocate(next->object.vm_object); 806 vm_map_entry_dispose(map, next); 807 } 808 } 809 } 810 /* 811 * vm_map_clip_start: [ internal use only ] 812 * 813 * Asserts that the given entry begins at or after 814 * the specified address; if necessary, 815 * it splits the entry into two. 816 */ 817 #define vm_map_clip_start(map, entry, startaddr) \ 818 { \ 819 if (startaddr > entry->start) \ 820 _vm_map_clip_start(map, entry, startaddr); \ 821 } 822 823 /* 824 * This routine is called only when it is known that 825 * the entry must be split. 826 */ 827 static void 828 _vm_map_clip_start(map, entry, start) 829 register vm_map_t map; 830 register vm_map_entry_t entry; 831 register vm_offset_t start; 832 { 833 register vm_map_entry_t new_entry; 834 835 /* 836 * Split off the front portion -- note that we must insert the new 837 * entry BEFORE this one, so that this entry has the specified 838 * starting address. 839 */ 840 841 vm_map_simplify_entry(map, entry); 842 843 /* 844 * If there is no object backing this entry, we might as well create 845 * one now. If we defer it, an object can get created after the map 846 * is clipped, and individual objects will be created for the split-up 847 * map. This is a bit of a hack, but is also about the best place to 848 * put this improvement. 849 */ 850 851 if (entry->object.vm_object == NULL) { 852 vm_object_t object; 853 854 object = vm_object_allocate(OBJT_DEFAULT, 855 atop(entry->end - entry->start)); 856 entry->object.vm_object = object; 857 entry->offset = 0; 858 } 859 860 new_entry = vm_map_entry_create(map); 861 *new_entry = *entry; 862 863 new_entry->end = start; 864 entry->offset += (start - entry->start); 865 entry->start = start; 866 867 vm_map_entry_link(map, entry->prev, new_entry); 868 869 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 870 vm_map_reference(new_entry->object.share_map); 871 else 872 vm_object_reference(new_entry->object.vm_object); 873 } 874 875 /* 876 * vm_map_clip_end: [ internal use only ] 877 * 878 * Asserts that the given entry ends at or before 879 * the specified address; if necessary, 880 * it splits the entry into two. 881 */ 882 883 #define vm_map_clip_end(map, entry, endaddr) \ 884 { \ 885 if (endaddr < entry->end) \ 886 _vm_map_clip_end(map, entry, endaddr); \ 887 } 888 889 /* 890 * This routine is called only when it is known that 891 * the entry must be split. 892 */ 893 static void 894 _vm_map_clip_end(map, entry, end) 895 register vm_map_t map; 896 register vm_map_entry_t entry; 897 register vm_offset_t end; 898 { 899 register vm_map_entry_t new_entry; 900 901 /* 902 * If there is no object backing this entry, we might as well create 903 * one now. If we defer it, an object can get created after the map 904 * is clipped, and individual objects will be created for the split-up 905 * map. This is a bit of a hack, but is also about the best place to 906 * put this improvement. 907 */ 908 909 if (entry->object.vm_object == NULL) { 910 vm_object_t object; 911 912 object = vm_object_allocate(OBJT_DEFAULT, 913 atop(entry->end - entry->start)); 914 entry->object.vm_object = object; 915 entry->offset = 0; 916 } 917 918 /* 919 * Create a new entry and insert it AFTER the specified entry 920 */ 921 922 new_entry = vm_map_entry_create(map); 923 *new_entry = *entry; 924 925 new_entry->start = entry->end = end; 926 new_entry->offset += (end - entry->start); 927 928 vm_map_entry_link(map, entry, new_entry); 929 930 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 931 vm_map_reference(new_entry->object.share_map); 932 else 933 vm_object_reference(new_entry->object.vm_object); 934 } 935 936 /* 937 * VM_MAP_RANGE_CHECK: [ internal use only ] 938 * 939 * Asserts that the starting and ending region 940 * addresses fall within the valid range of the map. 941 */ 942 #define VM_MAP_RANGE_CHECK(map, start, end) \ 943 { \ 944 if (start < vm_map_min(map)) \ 945 start = vm_map_min(map); \ 946 if (end > vm_map_max(map)) \ 947 end = vm_map_max(map); \ 948 if (start > end) \ 949 start = end; \ 950 } 951 952 /* 953 * vm_map_submap: [ kernel use only ] 954 * 955 * Mark the given range as handled by a subordinate map. 956 * 957 * This range must have been created with vm_map_find, 958 * and no other operations may have been performed on this 959 * range prior to calling vm_map_submap. 960 * 961 * Only a limited number of operations can be performed 962 * within this rage after calling vm_map_submap: 963 * vm_fault 964 * [Don't try vm_map_copy!] 965 * 966 * To remove a submapping, one must first remove the 967 * range from the superior map, and then destroy the 968 * submap (if desired). [Better yet, don't try it.] 969 */ 970 int 971 vm_map_submap(map, start, end, submap) 972 register vm_map_t map; 973 register vm_offset_t start; 974 register vm_offset_t end; 975 vm_map_t submap; 976 { 977 vm_map_entry_t entry; 978 register int result = KERN_INVALID_ARGUMENT; 979 980 vm_map_lock(map); 981 982 VM_MAP_RANGE_CHECK(map, start, end); 983 984 if (vm_map_lookup_entry(map, start, &entry)) { 985 vm_map_clip_start(map, entry, start); 986 } else 987 entry = entry->next; 988 989 vm_map_clip_end(map, entry, end); 990 991 if ((entry->start == start) && (entry->end == end) && 992 ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) && 993 (entry->object.vm_object == NULL)) { 994 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 995 vm_map_reference(entry->object.sub_map = submap); 996 result = KERN_SUCCESS; 997 } 998 vm_map_unlock(map); 999 1000 return (result); 1001 } 1002 1003 /* 1004 * vm_map_protect: 1005 * 1006 * Sets the protection of the specified address 1007 * region in the target map. If "set_max" is 1008 * specified, the maximum protection is to be set; 1009 * otherwise, only the current protection is affected. 1010 */ 1011 int 1012 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1013 vm_prot_t new_prot, boolean_t set_max) 1014 { 1015 register vm_map_entry_t current; 1016 vm_map_entry_t entry; 1017 1018 vm_map_lock(map); 1019 1020 VM_MAP_RANGE_CHECK(map, start, end); 1021 1022 if (vm_map_lookup_entry(map, start, &entry)) { 1023 vm_map_clip_start(map, entry, start); 1024 } else { 1025 entry = entry->next; 1026 } 1027 1028 /* 1029 * Make a first pass to check for protection violations. 1030 */ 1031 1032 current = entry; 1033 while ((current != &map->header) && (current->start < end)) { 1034 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1035 vm_map_unlock(map); 1036 return (KERN_INVALID_ARGUMENT); 1037 } 1038 if ((new_prot & current->max_protection) != new_prot) { 1039 vm_map_unlock(map); 1040 return (KERN_PROTECTION_FAILURE); 1041 } 1042 current = current->next; 1043 } 1044 1045 /* 1046 * Go back and fix up protections. [Note that clipping is not 1047 * necessary the second time.] 1048 */ 1049 1050 current = entry; 1051 1052 while ((current != &map->header) && (current->start < end)) { 1053 vm_prot_t old_prot; 1054 1055 vm_map_clip_end(map, current, end); 1056 1057 old_prot = current->protection; 1058 if (set_max) 1059 current->protection = 1060 (current->max_protection = new_prot) & 1061 old_prot; 1062 else 1063 current->protection = new_prot; 1064 1065 /* 1066 * Update physical map if necessary. Worry about copy-on-write 1067 * here -- CHECK THIS XXX 1068 */ 1069 1070 if (current->protection != old_prot) { 1071 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1072 VM_PROT_ALL) 1073 1074 if (current->eflags & MAP_ENTRY_IS_A_MAP) { 1075 vm_map_entry_t share_entry; 1076 vm_offset_t share_end; 1077 1078 vm_map_lock(current->object.share_map); 1079 (void) vm_map_lookup_entry( 1080 current->object.share_map, 1081 current->offset, 1082 &share_entry); 1083 share_end = current->offset + 1084 (current->end - current->start); 1085 while ((share_entry != 1086 ¤t->object.share_map->header) && 1087 (share_entry->start < share_end)) { 1088 1089 pmap_protect(map->pmap, 1090 (qmax(share_entry->start, 1091 current->offset) - 1092 current->offset + 1093 current->start), 1094 min(share_entry->end, 1095 share_end) - 1096 current->offset + 1097 current->start, 1098 current->protection & 1099 MASK(share_entry)); 1100 1101 share_entry = share_entry->next; 1102 } 1103 vm_map_unlock(current->object.share_map); 1104 } else 1105 pmap_protect(map->pmap, current->start, 1106 current->end, 1107 current->protection & MASK(entry)); 1108 #undef MASK 1109 } 1110 1111 vm_map_simplify_entry(map, current); 1112 1113 current = current->next; 1114 } 1115 1116 vm_map_unlock(map); 1117 return (KERN_SUCCESS); 1118 } 1119 1120 /* 1121 * vm_map_madvise: 1122 * 1123 * This routine traverses a processes map handling the madvise 1124 * system call. 1125 */ 1126 void 1127 vm_map_madvise(map, pmap, start, end, advise) 1128 vm_map_t map; 1129 pmap_t pmap; 1130 vm_offset_t start, end; 1131 int advise; 1132 { 1133 register vm_map_entry_t current; 1134 vm_map_entry_t entry; 1135 1136 vm_map_lock(map); 1137 1138 VM_MAP_RANGE_CHECK(map, start, end); 1139 1140 if (vm_map_lookup_entry(map, start, &entry)) { 1141 vm_map_clip_start(map, entry, start); 1142 } else 1143 entry = entry->next; 1144 1145 for(current = entry; 1146 (current != &map->header) && (current->start < end); 1147 current = current->next) { 1148 vm_size_t size = current->end - current->start; 1149 1150 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1151 continue; 1152 } 1153 1154 /* 1155 * Create an object if needed 1156 */ 1157 if (current->object.vm_object == NULL) { 1158 vm_object_t object; 1159 object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size)); 1160 current->object.vm_object = object; 1161 current->offset = 0; 1162 } 1163 1164 vm_map_clip_end(map, current, end); 1165 switch (advise) { 1166 case MADV_NORMAL: 1167 current->object.vm_object->behavior = OBJ_NORMAL; 1168 break; 1169 case MADV_SEQUENTIAL: 1170 current->object.vm_object->behavior = OBJ_SEQUENTIAL; 1171 break; 1172 case MADV_RANDOM: 1173 current->object.vm_object->behavior = OBJ_RANDOM; 1174 break; 1175 /* 1176 * Right now, we could handle DONTNEED and WILLNEED with common code. 1177 * They are mostly the same, except for the potential async reads (NYI). 1178 */ 1179 case MADV_FREE: 1180 case MADV_DONTNEED: 1181 { 1182 vm_pindex_t pindex; 1183 int count; 1184 size = current->end - current->start; 1185 pindex = OFF_TO_IDX(entry->offset); 1186 count = OFF_TO_IDX(size); 1187 /* 1188 * MADV_DONTNEED removes the page from all 1189 * pmaps, so pmap_remove is not necessary. 1190 */ 1191 vm_object_madvise(current->object.vm_object, 1192 pindex, count, advise); 1193 } 1194 break; 1195 1196 case MADV_WILLNEED: 1197 { 1198 vm_pindex_t pindex; 1199 int count; 1200 size = current->end - current->start; 1201 pindex = OFF_TO_IDX(current->offset); 1202 count = OFF_TO_IDX(size); 1203 vm_object_madvise(current->object.vm_object, 1204 pindex, count, advise); 1205 pmap_object_init_pt(pmap, current->start, 1206 current->object.vm_object, pindex, 1207 (count << PAGE_SHIFT), 0); 1208 } 1209 break; 1210 1211 default: 1212 break; 1213 } 1214 } 1215 1216 vm_map_simplify_entry(map, entry); 1217 vm_map_unlock(map); 1218 return; 1219 } 1220 1221 1222 /* 1223 * vm_map_inherit: 1224 * 1225 * Sets the inheritance of the specified address 1226 * range in the target map. Inheritance 1227 * affects how the map will be shared with 1228 * child maps at the time of vm_map_fork. 1229 */ 1230 int 1231 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1232 vm_inherit_t new_inheritance) 1233 { 1234 register vm_map_entry_t entry; 1235 vm_map_entry_t temp_entry; 1236 1237 switch (new_inheritance) { 1238 case VM_INHERIT_NONE: 1239 case VM_INHERIT_COPY: 1240 case VM_INHERIT_SHARE: 1241 break; 1242 default: 1243 return (KERN_INVALID_ARGUMENT); 1244 } 1245 1246 vm_map_lock(map); 1247 1248 VM_MAP_RANGE_CHECK(map, start, end); 1249 1250 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1251 entry = temp_entry; 1252 vm_map_clip_start(map, entry, start); 1253 } else 1254 entry = temp_entry->next; 1255 1256 while ((entry != &map->header) && (entry->start < end)) { 1257 vm_map_clip_end(map, entry, end); 1258 1259 entry->inheritance = new_inheritance; 1260 1261 entry = entry->next; 1262 } 1263 1264 vm_map_simplify_entry(map, temp_entry); 1265 vm_map_unlock(map); 1266 return (KERN_SUCCESS); 1267 } 1268 1269 /* 1270 * Implement the semantics of mlock 1271 */ 1272 int 1273 vm_map_user_pageable(map, start, end, new_pageable) 1274 register vm_map_t map; 1275 register vm_offset_t start; 1276 register vm_offset_t end; 1277 register boolean_t new_pageable; 1278 { 1279 vm_map_entry_t entry; 1280 vm_map_entry_t start_entry; 1281 vm_offset_t estart; 1282 int rv; 1283 1284 vm_map_lock(map); 1285 VM_MAP_RANGE_CHECK(map, start, end); 1286 1287 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1288 vm_map_unlock(map); 1289 return (KERN_INVALID_ADDRESS); 1290 } 1291 1292 if (new_pageable) { 1293 1294 entry = start_entry; 1295 vm_map_clip_start(map, entry, start); 1296 1297 /* 1298 * Now decrement the wiring count for each region. If a region 1299 * becomes completely unwired, unwire its physical pages and 1300 * mappings. 1301 */ 1302 vm_map_set_recursive(map); 1303 1304 entry = start_entry; 1305 while ((entry != &map->header) && (entry->start < end)) { 1306 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1307 vm_map_clip_end(map, entry, end); 1308 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1309 entry->wired_count--; 1310 if (entry->wired_count == 0) 1311 vm_fault_unwire(map, entry->start, entry->end); 1312 } 1313 vm_map_simplify_entry(map,entry); 1314 entry = entry->next; 1315 } 1316 vm_map_clear_recursive(map); 1317 } else { 1318 1319 entry = start_entry; 1320 1321 while ((entry != &map->header) && (entry->start < end)) { 1322 1323 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1324 entry = entry->next; 1325 continue; 1326 } 1327 1328 if (entry->wired_count != 0) { 1329 entry->wired_count++; 1330 entry->eflags |= MAP_ENTRY_USER_WIRED; 1331 entry = entry->next; 1332 continue; 1333 } 1334 1335 /* Here on entry being newly wired */ 1336 1337 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 1338 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1339 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1340 1341 vm_object_shadow(&entry->object.vm_object, 1342 &entry->offset, 1343 atop(entry->end - entry->start)); 1344 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1345 1346 } else if (entry->object.vm_object == NULL) { 1347 1348 entry->object.vm_object = 1349 vm_object_allocate(OBJT_DEFAULT, 1350 atop(entry->end - entry->start)); 1351 entry->offset = (vm_offset_t) 0; 1352 1353 } 1354 default_pager_convert_to_swapq(entry->object.vm_object); 1355 } 1356 1357 vm_map_clip_start(map, entry, start); 1358 vm_map_clip_end(map, entry, end); 1359 1360 entry->wired_count++; 1361 entry->eflags |= MAP_ENTRY_USER_WIRED; 1362 estart = entry->start; 1363 1364 /* First we need to allow map modifications */ 1365 vm_map_set_recursive(map); 1366 vm_map_lock_downgrade(map); 1367 1368 rv = vm_fault_user_wire(map, entry->start, entry->end); 1369 if (rv) { 1370 1371 entry->wired_count--; 1372 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1373 1374 vm_map_clear_recursive(map); 1375 vm_map_unlock(map); 1376 1377 (void) vm_map_user_pageable(map, start, entry->start, TRUE); 1378 return rv; 1379 } 1380 1381 vm_map_clear_recursive(map); 1382 if (vm_map_lock_upgrade(map)) { 1383 vm_map_lock(map); 1384 if (vm_map_lookup_entry(map, estart, &entry) 1385 == FALSE) { 1386 vm_map_unlock(map); 1387 (void) vm_map_user_pageable(map, 1388 start, 1389 estart, 1390 TRUE); 1391 return (KERN_INVALID_ADDRESS); 1392 } 1393 } 1394 vm_map_simplify_entry(map,entry); 1395 } 1396 } 1397 vm_map_unlock(map); 1398 return KERN_SUCCESS; 1399 } 1400 1401 /* 1402 * vm_map_pageable: 1403 * 1404 * Sets the pageability of the specified address 1405 * range in the target map. Regions specified 1406 * as not pageable require locked-down physical 1407 * memory and physical page maps. 1408 * 1409 * The map must not be locked, but a reference 1410 * must remain to the map throughout the call. 1411 */ 1412 int 1413 vm_map_pageable(map, start, end, new_pageable) 1414 register vm_map_t map; 1415 register vm_offset_t start; 1416 register vm_offset_t end; 1417 register boolean_t new_pageable; 1418 { 1419 register vm_map_entry_t entry; 1420 vm_map_entry_t start_entry; 1421 register vm_offset_t failed = 0; 1422 int rv; 1423 1424 vm_map_lock(map); 1425 1426 VM_MAP_RANGE_CHECK(map, start, end); 1427 1428 /* 1429 * Only one pageability change may take place at one time, since 1430 * vm_fault assumes it will be called only once for each 1431 * wiring/unwiring. Therefore, we have to make sure we're actually 1432 * changing the pageability for the entire region. We do so before 1433 * making any changes. 1434 */ 1435 1436 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1437 vm_map_unlock(map); 1438 return (KERN_INVALID_ADDRESS); 1439 } 1440 entry = start_entry; 1441 1442 /* 1443 * Actions are rather different for wiring and unwiring, so we have 1444 * two separate cases. 1445 */ 1446 1447 if (new_pageable) { 1448 1449 vm_map_clip_start(map, entry, start); 1450 1451 /* 1452 * Unwiring. First ensure that the range to be unwired is 1453 * really wired down and that there are no holes. 1454 */ 1455 while ((entry != &map->header) && (entry->start < end)) { 1456 1457 if (entry->wired_count == 0 || 1458 (entry->end < end && 1459 (entry->next == &map->header || 1460 entry->next->start > entry->end))) { 1461 vm_map_unlock(map); 1462 return (KERN_INVALID_ARGUMENT); 1463 } 1464 entry = entry->next; 1465 } 1466 1467 /* 1468 * Now decrement the wiring count for each region. If a region 1469 * becomes completely unwired, unwire its physical pages and 1470 * mappings. 1471 */ 1472 vm_map_set_recursive(map); 1473 1474 entry = start_entry; 1475 while ((entry != &map->header) && (entry->start < end)) { 1476 vm_map_clip_end(map, entry, end); 1477 1478 entry->wired_count--; 1479 if (entry->wired_count == 0) 1480 vm_fault_unwire(map, entry->start, entry->end); 1481 1482 entry = entry->next; 1483 } 1484 vm_map_simplify_entry(map, start_entry); 1485 vm_map_clear_recursive(map); 1486 } else { 1487 /* 1488 * Wiring. We must do this in two passes: 1489 * 1490 * 1. Holding the write lock, we create any shadow or zero-fill 1491 * objects that need to be created. Then we clip each map 1492 * entry to the region to be wired and increment its wiring 1493 * count. We create objects before clipping the map entries 1494 * to avoid object proliferation. 1495 * 1496 * 2. We downgrade to a read lock, and call vm_fault_wire to 1497 * fault in the pages for any newly wired area (wired_count is 1498 * 1). 1499 * 1500 * Downgrading to a read lock for vm_fault_wire avoids a possible 1501 * deadlock with another process that may have faulted on one 1502 * of the pages to be wired (it would mark the page busy, 1503 * blocking us, then in turn block on the map lock that we 1504 * hold). Because of problems in the recursive lock package, 1505 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1506 * any actions that require the write lock must be done 1507 * beforehand. Because we keep the read lock on the map, the 1508 * copy-on-write status of the entries we modify here cannot 1509 * change. 1510 */ 1511 1512 /* 1513 * Pass 1. 1514 */ 1515 while ((entry != &map->header) && (entry->start < end)) { 1516 if (entry->wired_count == 0) { 1517 1518 /* 1519 * Perform actions of vm_map_lookup that need 1520 * the write lock on the map: create a shadow 1521 * object for a copy-on-write region, or an 1522 * object for a zero-fill region. 1523 * 1524 * We don't have to do this for entries that 1525 * point to sharing maps, because we won't 1526 * hold the lock on the sharing map. 1527 */ 1528 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 1529 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1530 if (copyflag && 1531 ((entry->protection & VM_PROT_WRITE) != 0)) { 1532 1533 vm_object_shadow(&entry->object.vm_object, 1534 &entry->offset, 1535 atop(entry->end - entry->start)); 1536 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1537 } else if (entry->object.vm_object == NULL) { 1538 entry->object.vm_object = 1539 vm_object_allocate(OBJT_DEFAULT, 1540 atop(entry->end - entry->start)); 1541 entry->offset = (vm_offset_t) 0; 1542 } 1543 default_pager_convert_to_swapq(entry->object.vm_object); 1544 } 1545 } 1546 vm_map_clip_start(map, entry, start); 1547 vm_map_clip_end(map, entry, end); 1548 entry->wired_count++; 1549 1550 /* 1551 * Check for holes 1552 */ 1553 if (entry->end < end && 1554 (entry->next == &map->header || 1555 entry->next->start > entry->end)) { 1556 /* 1557 * Found one. Object creation actions do not 1558 * need to be undone, but the wired counts 1559 * need to be restored. 1560 */ 1561 while (entry != &map->header && entry->end > start) { 1562 entry->wired_count--; 1563 entry = entry->prev; 1564 } 1565 vm_map_unlock(map); 1566 return (KERN_INVALID_ARGUMENT); 1567 } 1568 entry = entry->next; 1569 } 1570 1571 /* 1572 * Pass 2. 1573 */ 1574 1575 /* 1576 * HACK HACK HACK HACK 1577 * 1578 * If we are wiring in the kernel map or a submap of it, 1579 * unlock the map to avoid deadlocks. We trust that the 1580 * kernel is well-behaved, and therefore will not do 1581 * anything destructive to this region of the map while 1582 * we have it unlocked. We cannot trust user processes 1583 * to do the same. 1584 * 1585 * HACK HACK HACK HACK 1586 */ 1587 if (vm_map_pmap(map) == kernel_pmap) { 1588 vm_map_unlock(map); /* trust me ... */ 1589 } else { 1590 vm_map_set_recursive(map); 1591 vm_map_lock_downgrade(map); 1592 } 1593 1594 rv = 0; 1595 entry = start_entry; 1596 while (entry != &map->header && entry->start < end) { 1597 /* 1598 * If vm_fault_wire fails for any page we need to undo 1599 * what has been done. We decrement the wiring count 1600 * for those pages which have not yet been wired (now) 1601 * and unwire those that have (later). 1602 * 1603 * XXX this violates the locking protocol on the map, 1604 * needs to be fixed. 1605 */ 1606 if (rv) 1607 entry->wired_count--; 1608 else if (entry->wired_count == 1) { 1609 rv = vm_fault_wire(map, entry->start, entry->end); 1610 if (rv) { 1611 failed = entry->start; 1612 entry->wired_count--; 1613 } 1614 } 1615 entry = entry->next; 1616 } 1617 1618 if (vm_map_pmap(map) == kernel_pmap) { 1619 vm_map_lock(map); 1620 } else { 1621 vm_map_clear_recursive(map); 1622 } 1623 if (rv) { 1624 vm_map_unlock(map); 1625 (void) vm_map_pageable(map, start, failed, TRUE); 1626 return (rv); 1627 } 1628 vm_map_simplify_entry(map, start_entry); 1629 } 1630 1631 vm_map_unlock(map); 1632 1633 return (KERN_SUCCESS); 1634 } 1635 1636 /* 1637 * vm_map_clean 1638 * 1639 * Push any dirty cached pages in the address range to their pager. 1640 * If syncio is TRUE, dirty pages are written synchronously. 1641 * If invalidate is TRUE, any cached pages are freed as well. 1642 * 1643 * Returns an error if any part of the specified range is not mapped. 1644 */ 1645 int 1646 vm_map_clean(map, start, end, syncio, invalidate) 1647 vm_map_t map; 1648 vm_offset_t start; 1649 vm_offset_t end; 1650 boolean_t syncio; 1651 boolean_t invalidate; 1652 { 1653 register vm_map_entry_t current; 1654 vm_map_entry_t entry; 1655 vm_size_t size; 1656 vm_object_t object; 1657 vm_ooffset_t offset; 1658 1659 vm_map_lock_read(map); 1660 VM_MAP_RANGE_CHECK(map, start, end); 1661 if (!vm_map_lookup_entry(map, start, &entry)) { 1662 vm_map_unlock_read(map); 1663 return (KERN_INVALID_ADDRESS); 1664 } 1665 /* 1666 * Make a first pass to check for holes. 1667 */ 1668 for (current = entry; current->start < end; current = current->next) { 1669 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1670 vm_map_unlock_read(map); 1671 return (KERN_INVALID_ARGUMENT); 1672 } 1673 if (end > current->end && 1674 (current->next == &map->header || 1675 current->end != current->next->start)) { 1676 vm_map_unlock_read(map); 1677 return (KERN_INVALID_ADDRESS); 1678 } 1679 } 1680 1681 /* 1682 * Make a second pass, cleaning/uncaching pages from the indicated 1683 * objects as we go. 1684 */ 1685 for (current = entry; current->start < end; current = current->next) { 1686 offset = current->offset + (start - current->start); 1687 size = (end <= current->end ? end : current->end) - start; 1688 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1689 register vm_map_t smap; 1690 vm_map_entry_t tentry; 1691 vm_size_t tsize; 1692 1693 smap = current->object.share_map; 1694 vm_map_lock_read(smap); 1695 (void) vm_map_lookup_entry(smap, offset, &tentry); 1696 tsize = tentry->end - offset; 1697 if (tsize < size) 1698 size = tsize; 1699 object = tentry->object.vm_object; 1700 offset = tentry->offset + (offset - tentry->start); 1701 vm_map_unlock_read(smap); 1702 } else { 1703 object = current->object.vm_object; 1704 } 1705 /* 1706 * Note that there is absolutely no sense in writing out 1707 * anonymous objects, so we track down the vnode object 1708 * to write out. 1709 * We invalidate (remove) all pages from the address space 1710 * anyway, for semantic correctness. 1711 */ 1712 while (object->backing_object) { 1713 object = object->backing_object; 1714 offset += object->backing_object_offset; 1715 if (object->size < OFF_TO_IDX( offset + size)) 1716 size = IDX_TO_OFF(object->size) - offset; 1717 } 1718 if (invalidate) 1719 pmap_remove(vm_map_pmap(map), current->start, 1720 current->start + size); 1721 if (object && (object->type == OBJT_VNODE)) { 1722 /* 1723 * Flush pages if writing is allowed. XXX should we continue 1724 * on an error? 1725 * 1726 * XXX Doing async I/O and then removing all the pages from 1727 * the object before it completes is probably a very bad 1728 * idea. 1729 */ 1730 if (current->protection & VM_PROT_WRITE) { 1731 if (object->type == OBJT_VNODE) 1732 vn_lock(object->handle, LK_EXCLUSIVE, curproc); 1733 vm_object_page_clean(object, 1734 OFF_TO_IDX(offset), 1735 OFF_TO_IDX(offset + size + PAGE_MASK), 1736 (syncio||invalidate)?1:0); 1737 if (invalidate) 1738 vm_object_page_remove(object, 1739 OFF_TO_IDX(offset), 1740 OFF_TO_IDX(offset + size + PAGE_MASK), 1741 FALSE); 1742 if (object->type == OBJT_VNODE) 1743 VOP_UNLOCK(object->handle, 0, curproc); 1744 } 1745 } 1746 start += size; 1747 } 1748 1749 vm_map_unlock_read(map); 1750 return (KERN_SUCCESS); 1751 } 1752 1753 /* 1754 * vm_map_entry_unwire: [ internal use only ] 1755 * 1756 * Make the region specified by this entry pageable. 1757 * 1758 * The map in question should be locked. 1759 * [This is the reason for this routine's existence.] 1760 */ 1761 static void 1762 vm_map_entry_unwire(map, entry) 1763 vm_map_t map; 1764 register vm_map_entry_t entry; 1765 { 1766 vm_fault_unwire(map, entry->start, entry->end); 1767 entry->wired_count = 0; 1768 } 1769 1770 /* 1771 * vm_map_entry_delete: [ internal use only ] 1772 * 1773 * Deallocate the given entry from the target map. 1774 */ 1775 static void 1776 vm_map_entry_delete(map, entry) 1777 register vm_map_t map; 1778 register vm_map_entry_t entry; 1779 { 1780 vm_map_entry_unlink(map, entry); 1781 map->size -= entry->end - entry->start; 1782 1783 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1784 vm_map_deallocate(entry->object.share_map); 1785 } else { 1786 vm_object_deallocate(entry->object.vm_object); 1787 } 1788 1789 vm_map_entry_dispose(map, entry); 1790 } 1791 1792 /* 1793 * vm_map_delete: [ internal use only ] 1794 * 1795 * Deallocates the given address range from the target 1796 * map. 1797 * 1798 * When called with a sharing map, removes pages from 1799 * that region from all physical maps. 1800 */ 1801 int 1802 vm_map_delete(map, start, end) 1803 register vm_map_t map; 1804 vm_offset_t start; 1805 register vm_offset_t end; 1806 { 1807 register vm_map_entry_t entry; 1808 vm_map_entry_t first_entry; 1809 1810 /* 1811 * Find the start of the region, and clip it 1812 */ 1813 1814 if (!vm_map_lookup_entry(map, start, &first_entry)) 1815 entry = first_entry->next; 1816 else { 1817 entry = first_entry; 1818 vm_map_clip_start(map, entry, start); 1819 1820 /* 1821 * Fix the lookup hint now, rather than each time though the 1822 * loop. 1823 */ 1824 1825 SAVE_HINT(map, entry->prev); 1826 } 1827 1828 /* 1829 * Save the free space hint 1830 */ 1831 1832 if (entry == &map->header) { 1833 map->first_free = &map->header; 1834 } else if (map->first_free->start >= start) 1835 map->first_free = entry->prev; 1836 1837 /* 1838 * Step through all entries in this region 1839 */ 1840 1841 while ((entry != &map->header) && (entry->start < end)) { 1842 vm_map_entry_t next; 1843 vm_offset_t s, e; 1844 vm_object_t object; 1845 vm_ooffset_t offset; 1846 1847 vm_map_clip_end(map, entry, end); 1848 1849 next = entry->next; 1850 s = entry->start; 1851 e = entry->end; 1852 offset = entry->offset; 1853 1854 /* 1855 * Unwire before removing addresses from the pmap; otherwise, 1856 * unwiring will put the entries back in the pmap. 1857 */ 1858 1859 object = entry->object.vm_object; 1860 if (entry->wired_count != 0) 1861 vm_map_entry_unwire(map, entry); 1862 1863 /* 1864 * If this is a sharing map, we must remove *all* references 1865 * to this data, since we can't find all of the physical maps 1866 * which are sharing it. 1867 */ 1868 1869 if (object == kernel_object || object == kmem_object) { 1870 vm_object_page_remove(object, OFF_TO_IDX(offset), 1871 OFF_TO_IDX(offset + (e - s)), FALSE); 1872 } else if (!map->is_main_map) { 1873 vm_object_pmap_remove(object, 1874 OFF_TO_IDX(offset), 1875 OFF_TO_IDX(offset + (e - s))); 1876 } else { 1877 pmap_remove(map->pmap, s, e); 1878 } 1879 1880 /* 1881 * Delete the entry (which may delete the object) only after 1882 * removing all pmap entries pointing to its pages. 1883 * (Otherwise, its page frames may be reallocated, and any 1884 * modify bits will be set in the wrong object!) 1885 */ 1886 1887 vm_map_entry_delete(map, entry); 1888 entry = next; 1889 } 1890 return (KERN_SUCCESS); 1891 } 1892 1893 /* 1894 * vm_map_remove: 1895 * 1896 * Remove the given address range from the target map. 1897 * This is the exported form of vm_map_delete. 1898 */ 1899 int 1900 vm_map_remove(map, start, end) 1901 register vm_map_t map; 1902 register vm_offset_t start; 1903 register vm_offset_t end; 1904 { 1905 register int result, s = 0; 1906 1907 if (map == kmem_map || map == mb_map) 1908 s = splvm(); 1909 1910 vm_map_lock(map); 1911 VM_MAP_RANGE_CHECK(map, start, end); 1912 result = vm_map_delete(map, start, end); 1913 vm_map_unlock(map); 1914 1915 if (map == kmem_map || map == mb_map) 1916 splx(s); 1917 1918 return (result); 1919 } 1920 1921 /* 1922 * vm_map_check_protection: 1923 * 1924 * Assert that the target map allows the specified 1925 * privilege on the entire address region given. 1926 * The entire region must be allocated. 1927 */ 1928 boolean_t 1929 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 1930 vm_prot_t protection) 1931 { 1932 register vm_map_entry_t entry; 1933 vm_map_entry_t tmp_entry; 1934 1935 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 1936 return (FALSE); 1937 } 1938 entry = tmp_entry; 1939 1940 while (start < end) { 1941 if (entry == &map->header) { 1942 return (FALSE); 1943 } 1944 /* 1945 * No holes allowed! 1946 */ 1947 1948 if (start < entry->start) { 1949 return (FALSE); 1950 } 1951 /* 1952 * Check protection associated with entry. 1953 */ 1954 1955 if ((entry->protection & protection) != protection) { 1956 return (FALSE); 1957 } 1958 /* go to next entry */ 1959 1960 start = entry->end; 1961 entry = entry->next; 1962 } 1963 return (TRUE); 1964 } 1965 1966 /* 1967 * vm_map_copy_entry: 1968 * 1969 * Copies the contents of the source entry to the destination 1970 * entry. The entries *must* be aligned properly. 1971 */ 1972 static void 1973 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 1974 vm_map_t src_map, dst_map; 1975 register vm_map_entry_t src_entry, dst_entry; 1976 { 1977 if ((dst_entry->eflags|src_entry->eflags) & 1978 (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 1979 return; 1980 1981 if (src_entry->wired_count == 0) { 1982 1983 /* 1984 * If the source entry is marked needs_copy, it is already 1985 * write-protected. 1986 */ 1987 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 1988 1989 boolean_t su; 1990 1991 /* 1992 * If the source entry has only one mapping, we can 1993 * just protect the virtual address range. 1994 */ 1995 if (!(su = src_map->is_main_map)) { 1996 su = (src_map->ref_count == 1); 1997 } 1998 if (su) { 1999 pmap_protect(src_map->pmap, 2000 src_entry->start, 2001 src_entry->end, 2002 src_entry->protection & ~VM_PROT_WRITE); 2003 } else { 2004 vm_object_pmap_copy(src_entry->object.vm_object, 2005 OFF_TO_IDX(src_entry->offset), 2006 OFF_TO_IDX(src_entry->offset + (src_entry->end 2007 - src_entry->start))); 2008 } 2009 } 2010 2011 /* 2012 * Make a copy of the object. 2013 */ 2014 if (src_entry->object.vm_object) { 2015 if ((src_entry->object.vm_object->handle == NULL) && 2016 (src_entry->object.vm_object->type == OBJT_DEFAULT || 2017 src_entry->object.vm_object->type == OBJT_SWAP)) 2018 vm_object_collapse(src_entry->object.vm_object); 2019 vm_object_reference(src_entry->object.vm_object); 2020 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2021 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2022 dst_entry->object.vm_object = 2023 src_entry->object.vm_object; 2024 dst_entry->offset = src_entry->offset; 2025 } else { 2026 dst_entry->object.vm_object = NULL; 2027 dst_entry->offset = 0; 2028 } 2029 2030 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2031 dst_entry->end - dst_entry->start, src_entry->start); 2032 } else { 2033 /* 2034 * Of course, wired down pages can't be set copy-on-write. 2035 * Cause wired pages to be copied into the new map by 2036 * simulating faults (the new pages are pageable) 2037 */ 2038 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2039 } 2040 } 2041 2042 /* 2043 * vmspace_fork: 2044 * Create a new process vmspace structure and vm_map 2045 * based on those of an existing process. The new map 2046 * is based on the old map, according to the inheritance 2047 * values on the regions in that map. 2048 * 2049 * The source map must not be locked. 2050 */ 2051 struct vmspace * 2052 vmspace_fork(vm1) 2053 register struct vmspace *vm1; 2054 { 2055 register struct vmspace *vm2; 2056 vm_map_t old_map = &vm1->vm_map; 2057 vm_map_t new_map; 2058 vm_map_entry_t old_entry; 2059 vm_map_entry_t new_entry; 2060 pmap_t new_pmap; 2061 vm_object_t object; 2062 2063 vm_map_lock(old_map); 2064 2065 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 2066 old_map->entries_pageable); 2067 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2068 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2069 new_pmap = &vm2->vm_pmap; /* XXX */ 2070 new_map = &vm2->vm_map; /* XXX */ 2071 2072 old_entry = old_map->header.next; 2073 2074 while (old_entry != &old_map->header) { 2075 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2076 panic("vm_map_fork: encountered a submap"); 2077 2078 switch (old_entry->inheritance) { 2079 case VM_INHERIT_NONE: 2080 break; 2081 2082 case VM_INHERIT_SHARE: 2083 /* 2084 * Clone the entry, creating the shared object if necessary. 2085 */ 2086 object = old_entry->object.vm_object; 2087 if (object == NULL) { 2088 object = vm_object_allocate(OBJT_DEFAULT, 2089 atop(old_entry->end - old_entry->start)); 2090 old_entry->object.vm_object = object; 2091 old_entry->offset = (vm_offset_t) 0; 2092 } else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2093 vm_object_shadow(&old_entry->object.vm_object, 2094 &old_entry->offset, 2095 atop(old_entry->end - old_entry->start)); 2096 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2097 object = old_entry->object.vm_object; 2098 } 2099 2100 /* 2101 * Clone the entry, referencing the sharing map. 2102 */ 2103 new_entry = vm_map_entry_create(new_map); 2104 *new_entry = *old_entry; 2105 new_entry->wired_count = 0; 2106 vm_object_reference(object); 2107 2108 /* 2109 * Insert the entry into the new map -- we know we're 2110 * inserting at the end of the new map. 2111 */ 2112 2113 vm_map_entry_link(new_map, new_map->header.prev, 2114 new_entry); 2115 2116 /* 2117 * Update the physical map 2118 */ 2119 2120 pmap_copy(new_map->pmap, old_map->pmap, 2121 new_entry->start, 2122 (old_entry->end - old_entry->start), 2123 old_entry->start); 2124 break; 2125 2126 case VM_INHERIT_COPY: 2127 /* 2128 * Clone the entry and link into the map. 2129 */ 2130 new_entry = vm_map_entry_create(new_map); 2131 *new_entry = *old_entry; 2132 new_entry->wired_count = 0; 2133 new_entry->object.vm_object = NULL; 2134 new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP; 2135 vm_map_entry_link(new_map, new_map->header.prev, 2136 new_entry); 2137 vm_map_copy_entry(old_map, new_map, old_entry, 2138 new_entry); 2139 break; 2140 } 2141 old_entry = old_entry->next; 2142 } 2143 2144 new_map->size = old_map->size; 2145 vm_map_unlock(old_map); 2146 2147 return (vm2); 2148 } 2149 2150 /* 2151 * Unshare the specified VM space for exec. If other processes are 2152 * mapped to it, then create a new one. The new vmspace is null. 2153 */ 2154 2155 void 2156 vmspace_exec(struct proc *p) { 2157 struct vmspace *oldvmspace = p->p_vmspace; 2158 struct vmspace *newvmspace; 2159 vm_map_t map = &p->p_vmspace->vm_map; 2160 2161 newvmspace = vmspace_alloc(map->min_offset, map->max_offset, 2162 map->entries_pageable); 2163 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2164 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2165 /* 2166 * This code is written like this for prototype purposes. The 2167 * goal is to avoid running down the vmspace here, but let the 2168 * other process's that are still using the vmspace to finally 2169 * run it down. Even though there is little or no chance of blocking 2170 * here, it is a good idea to keep this form for future mods. 2171 */ 2172 vm_map_reference(&oldvmspace->vm_map); 2173 vmspace_free(oldvmspace); 2174 p->p_vmspace = newvmspace; 2175 if (p == curproc) 2176 pmap_activate(p); 2177 vm_map_deallocate(&oldvmspace->vm_map); 2178 } 2179 2180 /* 2181 * Unshare the specified VM space for forcing COW. This 2182 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2183 */ 2184 2185 void 2186 vmspace_unshare(struct proc *p) { 2187 struct vmspace *oldvmspace = p->p_vmspace; 2188 struct vmspace *newvmspace; 2189 2190 if (oldvmspace->vm_refcnt == 1) 2191 return; 2192 newvmspace = vmspace_fork(oldvmspace); 2193 vm_map_reference(&oldvmspace->vm_map); 2194 vmspace_free(oldvmspace); 2195 p->p_vmspace = newvmspace; 2196 if (p == curproc) 2197 pmap_activate(p); 2198 vm_map_deallocate(&oldvmspace->vm_map); 2199 } 2200 2201 2202 /* 2203 * vm_map_lookup: 2204 * 2205 * Finds the VM object, offset, and 2206 * protection for a given virtual address in the 2207 * specified map, assuming a page fault of the 2208 * type specified. 2209 * 2210 * Leaves the map in question locked for read; return 2211 * values are guaranteed until a vm_map_lookup_done 2212 * call is performed. Note that the map argument 2213 * is in/out; the returned map must be used in 2214 * the call to vm_map_lookup_done. 2215 * 2216 * A handle (out_entry) is returned for use in 2217 * vm_map_lookup_done, to make that fast. 2218 * 2219 * If a lookup is requested with "write protection" 2220 * specified, the map may be changed to perform virtual 2221 * copying operations, although the data referenced will 2222 * remain the same. 2223 */ 2224 int 2225 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2226 vm_offset_t vaddr, 2227 vm_prot_t fault_type, 2228 vm_map_entry_t *out_entry, /* OUT */ 2229 vm_object_t *object, /* OUT */ 2230 vm_pindex_t *pindex, /* OUT */ 2231 vm_prot_t *out_prot, /* OUT */ 2232 boolean_t *wired, /* OUT */ 2233 boolean_t *single_use) /* OUT */ 2234 { 2235 vm_map_t share_map; 2236 vm_offset_t share_offset; 2237 register vm_map_entry_t entry; 2238 register vm_map_t map = *var_map; 2239 register vm_prot_t prot; 2240 register boolean_t su; 2241 2242 RetryLookup:; 2243 2244 /* 2245 * Lookup the faulting address. 2246 */ 2247 2248 vm_map_lock_read(map); 2249 2250 #define RETURN(why) \ 2251 { \ 2252 vm_map_unlock_read(map); \ 2253 return(why); \ 2254 } 2255 2256 /* 2257 * If the map has an interesting hint, try it before calling full 2258 * blown lookup routine. 2259 */ 2260 2261 entry = map->hint; 2262 2263 *out_entry = entry; 2264 2265 if ((entry == &map->header) || 2266 (vaddr < entry->start) || (vaddr >= entry->end)) { 2267 vm_map_entry_t tmp_entry; 2268 2269 /* 2270 * Entry was either not a valid hint, or the vaddr was not 2271 * contained in the entry, so do a full lookup. 2272 */ 2273 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2274 RETURN(KERN_INVALID_ADDRESS); 2275 2276 entry = tmp_entry; 2277 *out_entry = entry; 2278 } 2279 2280 /* 2281 * Handle submaps. 2282 */ 2283 2284 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2285 vm_map_t old_map = map; 2286 2287 *var_map = map = entry->object.sub_map; 2288 vm_map_unlock_read(old_map); 2289 goto RetryLookup; 2290 } 2291 2292 /* 2293 * Check whether this task is allowed to have this page. 2294 * Note the special case for MAP_ENTRY_COW 2295 * pages with an override. This is to implement a forced 2296 * COW for debuggers. 2297 */ 2298 2299 prot = entry->protection; 2300 if ((fault_type & VM_PROT_OVERRIDE_WRITE) == 0 || 2301 (entry->eflags & MAP_ENTRY_COW) == 0 || 2302 (entry->wired_count != 0)) { 2303 if ((fault_type & (prot)) != 2304 (fault_type & ~VM_PROT_OVERRIDE_WRITE)) 2305 RETURN(KERN_PROTECTION_FAILURE); 2306 } 2307 2308 /* 2309 * If this page is not pageable, we have to get it for all possible 2310 * accesses. 2311 */ 2312 2313 *wired = (entry->wired_count != 0); 2314 if (*wired) 2315 prot = fault_type = entry->protection; 2316 2317 /* 2318 * If we don't already have a VM object, track it down. 2319 */ 2320 2321 su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0; 2322 if (su) { 2323 share_map = map; 2324 share_offset = vaddr; 2325 } else { 2326 vm_map_entry_t share_entry; 2327 2328 /* 2329 * Compute the sharing map, and offset into it. 2330 */ 2331 2332 share_map = entry->object.share_map; 2333 share_offset = (vaddr - entry->start) + entry->offset; 2334 2335 /* 2336 * Look for the backing store object and offset 2337 */ 2338 2339 vm_map_lock_read(share_map); 2340 2341 if (!vm_map_lookup_entry(share_map, share_offset, 2342 &share_entry)) { 2343 vm_map_unlock_read(share_map); 2344 RETURN(KERN_INVALID_ADDRESS); 2345 } 2346 entry = share_entry; 2347 } 2348 2349 /* 2350 * If the entry was copy-on-write, we either ... 2351 */ 2352 2353 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2354 /* 2355 * If we want to write the page, we may as well handle that 2356 * now since we've got the sharing map locked. 2357 * 2358 * If we don't need to write the page, we just demote the 2359 * permissions allowed. 2360 */ 2361 2362 if (fault_type & VM_PROT_WRITE) { 2363 /* 2364 * Make a new object, and place it in the object 2365 * chain. Note that no new references have appeared 2366 * -- one just moved from the share map to the new 2367 * object. 2368 */ 2369 2370 if (vm_map_lock_upgrade(share_map)) { 2371 if (share_map != map) 2372 vm_map_unlock_read(map); 2373 2374 goto RetryLookup; 2375 } 2376 vm_object_shadow( 2377 &entry->object.vm_object, 2378 &entry->offset, 2379 atop(entry->end - entry->start)); 2380 2381 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2382 vm_map_lock_downgrade(share_map); 2383 } else { 2384 /* 2385 * We're attempting to read a copy-on-write page -- 2386 * don't allow writes. 2387 */ 2388 2389 prot &= (~VM_PROT_WRITE); 2390 } 2391 } 2392 /* 2393 * Create an object if necessary. 2394 */ 2395 if (entry->object.vm_object == NULL) { 2396 2397 if (vm_map_lock_upgrade(share_map)) { 2398 if (share_map != map) 2399 vm_map_unlock_read(map); 2400 goto RetryLookup; 2401 } 2402 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2403 atop(entry->end - entry->start)); 2404 entry->offset = 0; 2405 vm_map_lock_downgrade(share_map); 2406 } 2407 2408 if (entry->object.vm_object->type == OBJT_DEFAULT) 2409 default_pager_convert_to_swapq(entry->object.vm_object); 2410 /* 2411 * Return the object/offset from this entry. If the entry was 2412 * copy-on-write or empty, it has been fixed up. 2413 */ 2414 2415 *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset); 2416 *object = entry->object.vm_object; 2417 2418 /* 2419 * Return whether this is the only map sharing this data. 2420 */ 2421 2422 if (!su) { 2423 su = (share_map->ref_count == 1); 2424 } 2425 *out_prot = prot; 2426 *single_use = su; 2427 2428 return (KERN_SUCCESS); 2429 2430 #undef RETURN 2431 } 2432 2433 /* 2434 * vm_map_lookup_done: 2435 * 2436 * Releases locks acquired by a vm_map_lookup 2437 * (according to the handle returned by that lookup). 2438 */ 2439 2440 void 2441 vm_map_lookup_done(map, entry) 2442 register vm_map_t map; 2443 vm_map_entry_t entry; 2444 { 2445 /* 2446 * If this entry references a map, unlock it first. 2447 */ 2448 2449 if (entry->eflags & MAP_ENTRY_IS_A_MAP) 2450 vm_map_unlock_read(entry->object.share_map); 2451 2452 /* 2453 * Unlock the main-level map 2454 */ 2455 2456 vm_map_unlock_read(map); 2457 } 2458 2459 /* 2460 * Implement uiomove with VM operations. This handles (and collateral changes) 2461 * support every combination of source object modification, and COW type 2462 * operations. 2463 */ 2464 int 2465 vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages) 2466 vm_map_t mapa; 2467 vm_object_t srcobject; 2468 off_t cp; 2469 int cnt; 2470 vm_offset_t uaddra; 2471 int *npages; 2472 { 2473 vm_map_t map; 2474 vm_object_t first_object, object; 2475 vm_map_entry_t first_entry, entry; 2476 vm_prot_t prot; 2477 boolean_t wired, su; 2478 int tcnt, rv; 2479 vm_offset_t uaddr, start, end; 2480 vm_pindex_t first_pindex, osize, oindex; 2481 off_t ooffset; 2482 int skipinit, allremoved; 2483 2484 if (npages) 2485 *npages = 0; 2486 2487 allremoved = 0; 2488 2489 while (cnt > 0) { 2490 map = mapa; 2491 uaddr = uaddra; 2492 skipinit = 0; 2493 2494 if ((vm_map_lookup(&map, uaddr, 2495 VM_PROT_READ, &first_entry, &first_object, 2496 &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) { 2497 return EFAULT; 2498 } 2499 2500 vm_map_clip_start(map, first_entry, uaddr); 2501 2502 tcnt = cnt; 2503 if ((uaddr + tcnt) > first_entry->end) 2504 tcnt = first_entry->end - uaddr; 2505 2506 vm_map_clip_end(map, first_entry, uaddr + tcnt); 2507 2508 start = first_entry->start; 2509 end = first_entry->end; 2510 2511 osize = atop(tcnt); 2512 2513 oindex = OFF_TO_IDX(cp); 2514 if (npages) { 2515 vm_pindex_t idx; 2516 for (idx = 0; idx < osize; idx++) { 2517 vm_page_t m; 2518 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2519 vm_map_lookup_done(map, first_entry); 2520 return 0; 2521 } 2522 if ((m->flags & PG_BUSY) || 2523 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2524 vm_map_lookup_done(map, first_entry); 2525 return 0; 2526 } 2527 } 2528 } 2529 2530 /* 2531 * If we are changing an existing map entry, just redirect 2532 * the object, and change mappings. 2533 */ 2534 if (first_object->type == OBJT_VNODE) { 2535 2536 if (first_object != srcobject) { 2537 2538 vm_object_deallocate(first_object); 2539 srcobject->flags |= OBJ_OPT; 2540 vm_object_reference(srcobject); 2541 2542 first_entry->object.vm_object = srcobject; 2543 first_entry->offset = cp; 2544 2545 } else if (first_entry->offset != cp) { 2546 2547 first_entry->offset = cp; 2548 2549 } else { 2550 2551 skipinit = 1; 2552 2553 } 2554 2555 if (skipinit == 0) { 2556 /* 2557 * Remove old window into the file 2558 */ 2559 if (!allremoved) { 2560 pmap_remove (map->pmap, uaddra, uaddra + cnt); 2561 allremoved = 1; 2562 } 2563 2564 /* 2565 * Force copy on write for mmaped regions 2566 */ 2567 vm_object_pmap_copy_1 (srcobject, 2568 oindex, oindex + osize); 2569 } 2570 2571 } else if ((first_object->ref_count == 1) && 2572 (first_object->size == osize) && 2573 (first_object->resident_page_count == 0)) { 2574 vm_object_t oldobject; 2575 2576 oldobject = first_object->backing_object; 2577 2578 if ((first_object->backing_object_offset != cp) || 2579 (oldobject != srcobject)) { 2580 /* 2581 * Remove old window into the file 2582 */ 2583 if (!allremoved) { 2584 pmap_remove (map->pmap, uaddra, uaddra + cnt); 2585 allremoved = 1; 2586 } 2587 2588 /* 2589 * Force copy on write for mmaped regions 2590 */ 2591 vm_object_pmap_copy_1 (srcobject, 2592 oindex, oindex + osize); 2593 2594 /* 2595 * Point the object appropriately 2596 */ 2597 if (oldobject != srcobject) { 2598 /* 2599 * Set the object optimization hint flag 2600 */ 2601 srcobject->flags |= OBJ_OPT; 2602 vm_object_reference(srcobject); 2603 2604 if (oldobject) { 2605 TAILQ_REMOVE(&oldobject->shadow_head, 2606 first_object, shadow_list); 2607 oldobject->shadow_count--; 2608 if (oldobject->shadow_count == 0) 2609 oldobject->flags &= ~OBJ_OPT; 2610 vm_object_deallocate(oldobject); 2611 } 2612 2613 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 2614 first_object, shadow_list); 2615 srcobject->shadow_count++; 2616 2617 first_object->backing_object = srcobject; 2618 } 2619 2620 first_object->backing_object_offset = cp; 2621 } else { 2622 skipinit = 1; 2623 } 2624 /* 2625 * Otherwise, we have to do a logical mmap. 2626 */ 2627 } else { 2628 2629 srcobject->flags |= OBJ_OPT; 2630 vm_object_reference(srcobject); 2631 2632 if (!allremoved) { 2633 pmap_remove (map->pmap, uaddra, uaddra + cnt); 2634 allremoved = 1; 2635 } 2636 vm_object_pmap_copy_1 (srcobject, 2637 oindex, oindex + osize); 2638 vm_map_lookup_done(map, first_entry); 2639 2640 vm_map_lock(map); 2641 2642 if (first_entry == &map->header) { 2643 map->first_free = &map->header; 2644 } else if (map->first_free->start >= start) { 2645 map->first_free = first_entry->prev; 2646 } 2647 2648 SAVE_HINT(map, first_entry->prev); 2649 vm_map_entry_delete(map, first_entry); 2650 2651 rv = vm_map_insert(map, srcobject, cp, start, end, 2652 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_COPY_NEEDED); 2653 2654 if (rv != KERN_SUCCESS) 2655 panic("vm_uiomove: could not insert new entry: %d", rv); 2656 } 2657 2658 /* 2659 * Map the window directly, if it is already in memory 2660 */ 2661 if (!skipinit) 2662 pmap_object_init_pt(map->pmap, start, 2663 srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 0); 2664 2665 vm_map_unlock(map); 2666 2667 cnt -= tcnt; 2668 uaddra += tcnt; 2669 cp += tcnt; 2670 if (npages) 2671 *npages += osize; 2672 } 2673 return 0; 2674 } 2675 2676 /* 2677 * local routine to allocate a page for an object. 2678 */ 2679 static vm_page_t 2680 vm_freeze_page_alloc(object, pindex) 2681 vm_object_t object; 2682 vm_pindex_t pindex; 2683 { 2684 vm_page_t m; 2685 2686 while ((m = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL)) == NULL) { 2687 VM_WAIT; 2688 if (m = vm_page_lookup(object, pindex)) 2689 return NULL; 2690 } 2691 2692 m->valid = VM_PAGE_BITS_ALL; 2693 m->dirty = 0; 2694 vm_page_deactivate(m); 2695 return m; 2696 } 2697 2698 /* 2699 * Performs the copy_on_write operations necessary to allow the virtual copies 2700 * into user space to work. This has to be called for write(2) system calls 2701 * from other processes, file unlinking, and file size shrinkage. 2702 */ 2703 void 2704 vm_freeze_copyopts(object, froma, toa) 2705 vm_object_t object; 2706 vm_pindex_t froma, toa; 2707 { 2708 int s; 2709 vm_object_t robject, robjectn; 2710 vm_pindex_t idx, from, to; 2711 return; 2712 2713 if ((vfs_ioopt == 0) || (object == NULL) || 2714 ((object->flags & OBJ_OPT) == 0)) 2715 return; 2716 2717 if (object->shadow_count > object->ref_count) 2718 panic("vm_freeze_copyopts: sc > rc"); 2719 2720 for( robject = TAILQ_FIRST(&object->shadow_head); 2721 robject; 2722 robject = robjectn) { 2723 vm_pindex_t bo_pindex; 2724 vm_pindex_t dstpindex; 2725 vm_page_t m_in, m_out; 2726 2727 robjectn = TAILQ_NEXT(robject, shadow_list); 2728 2729 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 2730 if (bo_pindex > toa) 2731 continue; 2732 2733 if ((bo_pindex + robject->size) < froma) 2734 continue; 2735 2736 vm_object_reference(robject); 2737 2738 s = splvm(); 2739 while (robject->paging_in_progress) { 2740 robject->flags |= OBJ_PIPWNT; 2741 tsleep(robject, PVM, "objfrz", 0); 2742 } 2743 splx(s); 2744 2745 if (robject->ref_count == 1) { 2746 vm_object_deallocate(robject); 2747 continue; 2748 } 2749 2750 robject->paging_in_progress++; 2751 from = froma; 2752 if (from < bo_pindex) 2753 from = bo_pindex; 2754 2755 to = toa; 2756 2757 for (idx = from; idx < to; idx++) { 2758 2759 dstpindex = idx - bo_pindex; 2760 if (dstpindex >= robject->size) 2761 break; 2762 2763 m_in = vm_page_lookup(object, idx); 2764 if (m_in == NULL) 2765 continue; 2766 2767 if( m_in->flags & PG_BUSY) { 2768 s = splvm(); 2769 while (m_in && (m_in->flags & PG_BUSY)) { 2770 m_in->flags |= PG_WANTED; 2771 tsleep(m_in, PVM, "pwtfrz", 0); 2772 m_in = vm_page_lookup(object, idx); 2773 } 2774 splx(s); 2775 if (m_in == NULL) 2776 continue; 2777 } 2778 m_in->flags |= PG_BUSY; 2779 2780 retryout: 2781 m_out = vm_page_lookup(robject, dstpindex); 2782 if( m_out && (m_out->flags & PG_BUSY)) { 2783 s = splvm(); 2784 while (m_out && (m_out->flags & PG_BUSY)) { 2785 m_out->flags |= PG_WANTED; 2786 tsleep(m_out, PVM, "pwtfrz", 0); 2787 m_out = vm_page_lookup(robject, dstpindex); 2788 } 2789 splx(s); 2790 } 2791 2792 if (m_out == NULL) { 2793 m_out = vm_freeze_page_alloc(robject, dstpindex); 2794 if (m_out == NULL) 2795 goto retryout; 2796 } 2797 2798 if (m_out->valid == 0) { 2799 vm_page_protect(m_in, VM_PROT_NONE); 2800 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), 2801 VM_PAGE_TO_PHYS(m_out)); 2802 m_out->valid = VM_PAGE_BITS_ALL; 2803 } 2804 PAGE_WAKEUP(m_out); 2805 PAGE_WAKEUP(m_in); 2806 } 2807 2808 vm_object_pip_wakeup(robject); 2809 2810 if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) { 2811 2812 object->shadow_count--; 2813 2814 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 2815 robject->backing_object = NULL; 2816 robject->backing_object_offset = 0; 2817 2818 if (object->ref_count == 1) { 2819 if (object->shadow_count == 0) 2820 object->flags &= ~OBJ_OPT; 2821 vm_object_deallocate(object); 2822 vm_object_deallocate(robject); 2823 return; 2824 } 2825 vm_object_deallocate(object); 2826 } 2827 vm_object_deallocate(robject); 2828 } 2829 if (object->shadow_count == 0) 2830 object->flags &= ~OBJ_OPT; 2831 } 2832 2833 #include "opt_ddb.h" 2834 #ifdef DDB 2835 #include <sys/kernel.h> 2836 2837 #include <ddb/ddb.h> 2838 2839 /* 2840 * vm_map_print: [ debug ] 2841 */ 2842 DB_SHOW_COMMAND(map, vm_map_print) 2843 { 2844 static int nlines; 2845 /* XXX convert args. */ 2846 register vm_map_t map = (vm_map_t)addr; 2847 boolean_t full = have_addr; 2848 2849 register vm_map_entry_t entry; 2850 2851 db_iprintf("%s map 0x%x: pmap=0x%x, ref=%d, nentries=%d, version=%d\n", 2852 (map->is_main_map ? "Task" : "Share"), 2853 (int) map, (int) (map->pmap), map->ref_count, map->nentries, 2854 map->timestamp); 2855 nlines++; 2856 2857 if (!full && db_indent) 2858 return; 2859 2860 db_indent += 2; 2861 for (entry = map->header.next; entry != &map->header; 2862 entry = entry->next) { 2863 #if 0 2864 if (nlines > 18) { 2865 db_printf("--More--"); 2866 cngetc(); 2867 db_printf("\r"); 2868 nlines = 0; 2869 } 2870 #endif 2871 2872 db_iprintf("map entry 0x%x: start=0x%x, end=0x%x\n", 2873 (int) entry, (int) entry->start, (int) entry->end); 2874 nlines++; 2875 if (map->is_main_map) { 2876 static char *inheritance_name[4] = 2877 {"share", "copy", "none", "donate_copy"}; 2878 2879 db_iprintf(" prot=%x/%x/%s", 2880 entry->protection, 2881 entry->max_protection, 2882 inheritance_name[entry->inheritance]); 2883 if (entry->wired_count != 0) 2884 db_printf(", wired"); 2885 } 2886 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 2887 db_printf(", share=0x%x, offset=0x%x\n", 2888 (int) entry->object.share_map, 2889 (int) entry->offset); 2890 nlines++; 2891 if ((entry->prev == &map->header) || 2892 ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) || 2893 (entry->prev->object.share_map != 2894 entry->object.share_map)) { 2895 db_indent += 2; 2896 vm_map_print((int)entry->object.share_map, 2897 full, 0, (char *)0); 2898 db_indent -= 2; 2899 } 2900 } else { 2901 db_printf(", object=0x%x, offset=0x%x", 2902 (int) entry->object.vm_object, 2903 (int) entry->offset); 2904 if (entry->eflags & MAP_ENTRY_COW) 2905 db_printf(", copy (%s)", 2906 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 2907 db_printf("\n"); 2908 nlines++; 2909 2910 if ((entry->prev == &map->header) || 2911 (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) || 2912 (entry->prev->object.vm_object != 2913 entry->object.vm_object)) { 2914 db_indent += 2; 2915 vm_object_print((int)entry->object.vm_object, 2916 full, 0, (char *)0); 2917 nlines += 4; 2918 db_indent -= 2; 2919 } 2920 } 2921 } 2922 db_indent -= 2; 2923 if (db_indent == 0) 2924 nlines = 0; 2925 } 2926 2927 2928 DB_SHOW_COMMAND(procvm, procvm) 2929 { 2930 struct proc *p; 2931 2932 if (have_addr) { 2933 p = (struct proc *) addr; 2934 } else { 2935 p = curproc; 2936 } 2937 2938 printf("p = 0x%x, vmspace = 0x%x, map = 0x%x, pmap = 0x%x\n", 2939 p, p->p_vmspace, &p->p_vmspace->vm_map, &p->p_vmspace->vm_pmap); 2940 2941 vm_map_print ((int) &p->p_vmspace->vm_map, 1, 0, NULL); 2942 } 2943 2944 #endif /* DDB */ 2945