1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_map.c,v 1.73 1997/04/06 02:29:43 dyson Exp $ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/queue.h> 76 #include <sys/vmmeter.h> 77 #include <sys/mman.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/vm_prot.h> 82 #include <vm/vm_inherit.h> 83 #include <sys/lock.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_pager.h> 90 #include <vm/vm_extern.h> 91 #include <vm/default_pager.h> 92 93 /* 94 * Virtual memory maps provide for the mapping, protection, 95 * and sharing of virtual memory objects. In addition, 96 * this module provides for an efficient virtual copy of 97 * memory from one map to another. 98 * 99 * Synchronization is required prior to most operations. 100 * 101 * Maps consist of an ordered doubly-linked list of simple 102 * entries; a single hint is used to speed up lookups. 103 * 104 * In order to properly represent the sharing of virtual 105 * memory regions among maps, the map structure is bi-level. 106 * Top-level ("address") maps refer to regions of sharable 107 * virtual memory. These regions are implemented as 108 * ("sharing") maps, which then refer to the actual virtual 109 * memory objects. When two address maps "share" memory, 110 * their top-level maps both have references to the same 111 * sharing map. When memory is virtual-copied from one 112 * address map to another, the references in the sharing 113 * maps are actually copied -- no copying occurs at the 114 * virtual memory object level. 115 * 116 * Since portions of maps are specified by start/end addreses, 117 * which may not align with existing map entries, all 118 * routines merely "clip" entries to these start/end values. 119 * [That is, an entry is split into two, bordering at a 120 * start or end value.] Note that these clippings may not 121 * always be necessary (as the two resulting entries are then 122 * not changed); however, the clipping is done for convenience. 123 * No attempt is currently made to "glue back together" two 124 * abutting entries. 125 * 126 * As mentioned above, virtual copy operations are performed 127 * by copying VM object references from one sharing map to 128 * another, and then marking both regions as copy-on-write. 129 * It is important to note that only one writeable reference 130 * to a VM object region exists in any map -- this means that 131 * shadow object creation can be delayed until a write operation 132 * occurs. 133 */ 134 135 /* 136 * vm_map_startup: 137 * 138 * Initialize the vm_map module. Must be called before 139 * any other vm_map routines. 140 * 141 * Map and entry structures are allocated from the general 142 * purpose memory pool with some exceptions: 143 * 144 * - The kernel map and kmem submap are allocated statically. 145 * - Kernel map entries are allocated out of a static pool. 146 * 147 * These restrictions are necessary since malloc() uses the 148 * maps and requires map entries. 149 */ 150 151 vm_offset_t kentry_data; 152 vm_size_t kentry_data_size; 153 static vm_map_entry_t kentry_free; 154 static vm_map_t kmap_free; 155 extern char kstack[]; 156 extern int inmprotect; 157 158 static int kentry_count; 159 static vm_offset_t mapvm_start, mapvm, mapvmmax; 160 static int mapvmpgcnt; 161 162 static struct vm_map_entry *mappool; 163 static int mappoolcnt; 164 #define KENTRY_LOW_WATER 128 165 166 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 167 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 168 static vm_map_entry_t vm_map_entry_create __P((vm_map_t)); 169 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t)); 170 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); 171 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 172 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, 173 vm_map_entry_t)); 174 175 void 176 vm_map_startup() 177 { 178 register int i; 179 register vm_map_entry_t mep; 180 vm_map_t mp; 181 182 /* 183 * Static map structures for allocation before initialization of 184 * kernel map or kmem map. vm_map_create knows how to deal with them. 185 */ 186 kmap_free = mp = (vm_map_t) kentry_data; 187 i = MAX_KMAP; 188 while (--i > 0) { 189 mp->header.next = (vm_map_entry_t) (mp + 1); 190 mp++; 191 } 192 mp++->header.next = NULL; 193 194 /* 195 * Form a free list of statically allocated kernel map entries with 196 * the rest. 197 */ 198 kentry_free = mep = (vm_map_entry_t) mp; 199 kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 200 while (--i > 0) { 201 mep->next = mep + 1; 202 mep++; 203 } 204 mep->next = NULL; 205 } 206 207 /* 208 * Allocate a vmspace structure, including a vm_map and pmap, 209 * and initialize those structures. The refcnt is set to 1. 210 * The remaining fields must be initialized by the caller. 211 */ 212 struct vmspace * 213 vmspace_alloc(min, max, pageable) 214 vm_offset_t min, max; 215 int pageable; 216 { 217 register struct vmspace *vm; 218 219 if (mapvmpgcnt == 0 && mapvm == 0) { 220 mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE; 221 mapvm_start = mapvm = kmem_alloc_pageable(kernel_map, 222 mapvmpgcnt * PAGE_SIZE); 223 mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE; 224 if (!mapvm) 225 mapvmpgcnt = 0; 226 } 227 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 228 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 229 vm_map_init(&vm->vm_map, min, max, pageable); 230 pmap_pinit(&vm->vm_pmap); 231 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 232 vm->vm_refcnt = 1; 233 return (vm); 234 } 235 236 void 237 vmspace_free(vm) 238 register struct vmspace *vm; 239 { 240 241 if (vm->vm_refcnt == 0) 242 panic("vmspace_free: attempt to free already freed vmspace"); 243 244 if (--vm->vm_refcnt == 0) { 245 246 /* 247 * Lock the map, to wait out all other references to it. 248 * Delete all of the mappings and pages they hold, then call 249 * the pmap module to reclaim anything left. 250 */ 251 vm_map_lock(&vm->vm_map); 252 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 253 vm->vm_map.max_offset); 254 vm_map_unlock(&vm->vm_map); 255 256 while( vm->vm_map.ref_count != 1) 257 tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0); 258 --vm->vm_map.ref_count; 259 vm_object_pmap_remove(vm->vm_upages_obj, 260 0, vm->vm_upages_obj->size); 261 vm_object_deallocate(vm->vm_upages_obj); 262 pmap_release(&vm->vm_pmap); 263 FREE(vm, M_VMMAP); 264 } else { 265 wakeup(&vm->vm_map.ref_count); 266 } 267 } 268 269 /* 270 * vm_map_create: 271 * 272 * Creates and returns a new empty VM map with 273 * the given physical map structure, and having 274 * the given lower and upper address bounds. 275 */ 276 vm_map_t 277 vm_map_create(pmap, min, max, pageable) 278 pmap_t pmap; 279 vm_offset_t min, max; 280 boolean_t pageable; 281 { 282 register vm_map_t result; 283 284 if (kmem_map == NULL) { 285 result = kmap_free; 286 if (result == NULL) 287 panic("vm_map_create: out of maps"); 288 kmap_free = (vm_map_t) result->header.next; 289 } else 290 MALLOC(result, vm_map_t, sizeof(struct vm_map), 291 M_VMMAP, M_WAITOK); 292 293 vm_map_init(result, min, max, pageable); 294 result->pmap = pmap; 295 return (result); 296 } 297 298 /* 299 * Initialize an existing vm_map structure 300 * such as that in the vmspace structure. 301 * The pmap is set elsewhere. 302 */ 303 void 304 vm_map_init(map, min, max, pageable) 305 register struct vm_map *map; 306 vm_offset_t min, max; 307 boolean_t pageable; 308 { 309 map->header.next = map->header.prev = &map->header; 310 map->nentries = 0; 311 map->size = 0; 312 map->ref_count = 1; 313 map->is_main_map = TRUE; 314 map->min_offset = min; 315 map->max_offset = max; 316 map->entries_pageable = pageable; 317 map->first_free = &map->header; 318 map->hint = &map->header; 319 map->timestamp = 0; 320 lockinit(&map->lock, PVM, "thrd_sleep", 0, 0); 321 simple_lock_init(&map->ref_lock); 322 } 323 324 /* 325 * vm_map_entry_dispose: [ internal use only ] 326 * 327 * Inverse of vm_map_entry_create. 328 */ 329 static void 330 vm_map_entry_dispose(map, entry) 331 vm_map_t map; 332 vm_map_entry_t entry; 333 { 334 int s; 335 336 if (map == kernel_map || map == kmem_map || 337 map == mb_map || map == pager_map) { 338 s = splvm(); 339 entry->next = kentry_free; 340 kentry_free = entry; 341 ++kentry_count; 342 splx(s); 343 } else { 344 entry->next = mappool; 345 mappool = entry; 346 ++mappoolcnt; 347 } 348 } 349 350 /* 351 * vm_map_entry_create: [ internal use only ] 352 * 353 * Allocates a VM map entry for insertion. 354 * No entry fields are filled in. This routine is 355 */ 356 static vm_map_entry_t 357 vm_map_entry_create(map) 358 vm_map_t map; 359 { 360 vm_map_entry_t entry; 361 int i; 362 int s; 363 364 /* 365 * This is a *very* nasty (and sort of incomplete) hack!!!! 366 */ 367 if (kentry_count < KENTRY_LOW_WATER) { 368 s = splvm(); 369 if (mapvmpgcnt && mapvm) { 370 vm_page_t m; 371 372 m = vm_page_alloc(kernel_object, 373 OFF_TO_IDX(mapvm - VM_MIN_KERNEL_ADDRESS), 374 (map == kmem_map || map == mb_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL); 375 376 if (m) { 377 int newentries; 378 379 newentries = (PAGE_SIZE / sizeof(struct vm_map_entry)); 380 vm_page_wire(m); 381 PAGE_WAKEUP(m); 382 m->valid = VM_PAGE_BITS_ALL; 383 pmap_kenter(mapvm, VM_PAGE_TO_PHYS(m)); 384 m->flags |= PG_WRITEABLE; 385 386 entry = (vm_map_entry_t) mapvm; 387 mapvm += PAGE_SIZE; 388 --mapvmpgcnt; 389 390 for (i = 0; i < newentries; i++) { 391 vm_map_entry_dispose(kernel_map, entry); 392 entry++; 393 } 394 } 395 } 396 splx(s); 397 } 398 399 if (map == kernel_map || map == kmem_map || 400 map == mb_map || map == pager_map) { 401 s = splvm(); 402 entry = kentry_free; 403 if (entry) { 404 kentry_free = entry->next; 405 --kentry_count; 406 } else { 407 panic("vm_map_entry_create: out of map entries for kernel"); 408 } 409 splx(s); 410 } else { 411 entry = mappool; 412 if (entry) { 413 mappool = entry->next; 414 --mappoolcnt; 415 } else { 416 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 417 M_VMMAPENT, M_WAITOK); 418 } 419 } 420 421 return (entry); 422 } 423 424 /* 425 * vm_map_entry_{un,}link: 426 * 427 * Insert/remove entries from maps. 428 */ 429 #define vm_map_entry_link(map, after_where, entry) \ 430 { \ 431 (map)->nentries++; \ 432 (entry)->prev = (after_where); \ 433 (entry)->next = (after_where)->next; \ 434 (entry)->prev->next = (entry); \ 435 (entry)->next->prev = (entry); \ 436 } 437 #define vm_map_entry_unlink(map, entry) \ 438 { \ 439 (map)->nentries--; \ 440 (entry)->next->prev = (entry)->prev; \ 441 (entry)->prev->next = (entry)->next; \ 442 } 443 444 /* 445 * vm_map_reference: 446 * 447 * Creates another valid reference to the given map. 448 * 449 */ 450 void 451 vm_map_reference(map) 452 register vm_map_t map; 453 { 454 if (map == NULL) 455 return; 456 457 map->ref_count++; 458 } 459 460 /* 461 * vm_map_deallocate: 462 * 463 * Removes a reference from the specified map, 464 * destroying it if no references remain. 465 * The map should not be locked. 466 */ 467 void 468 vm_map_deallocate(map) 469 register vm_map_t map; 470 { 471 register int c; 472 473 if (map == NULL) 474 return; 475 476 c = map->ref_count; 477 478 if (c == 0) 479 panic("vm_map_deallocate: deallocating already freed map"); 480 481 if (c != 1) { 482 --map->ref_count; 483 wakeup(&map->ref_count); 484 return; 485 } 486 /* 487 * Lock the map, to wait out all other references to it. 488 */ 489 490 vm_map_lock_drain_interlock(map); 491 (void) vm_map_delete(map, map->min_offset, map->max_offset); 492 --map->ref_count; 493 if( map->ref_count != 0) { 494 vm_map_unlock(map); 495 return; 496 } 497 498 pmap_destroy(map->pmap); 499 500 vm_map_unlock(map); 501 502 FREE(map, M_VMMAP); 503 } 504 505 /* 506 * SAVE_HINT: 507 * 508 * Saves the specified entry as the hint for 509 * future lookups. 510 */ 511 #define SAVE_HINT(map,value) \ 512 (map)->hint = (value); 513 514 /* 515 * vm_map_lookup_entry: [ internal use only ] 516 * 517 * Finds the map entry containing (or 518 * immediately preceding) the specified address 519 * in the given map; the entry is returned 520 * in the "entry" parameter. The boolean 521 * result indicates whether the address is 522 * actually contained in the map. 523 */ 524 boolean_t 525 vm_map_lookup_entry(map, address, entry) 526 register vm_map_t map; 527 register vm_offset_t address; 528 vm_map_entry_t *entry; /* OUT */ 529 { 530 register vm_map_entry_t cur; 531 register vm_map_entry_t last; 532 533 /* 534 * Start looking either from the head of the list, or from the hint. 535 */ 536 537 cur = map->hint; 538 539 if (cur == &map->header) 540 cur = cur->next; 541 542 if (address >= cur->start) { 543 /* 544 * Go from hint to end of list. 545 * 546 * But first, make a quick check to see if we are already looking 547 * at the entry we want (which is usually the case). Note also 548 * that we don't need to save the hint here... it is the same 549 * hint (unless we are at the header, in which case the hint 550 * didn't buy us anything anyway). 551 */ 552 last = &map->header; 553 if ((cur != last) && (cur->end > address)) { 554 *entry = cur; 555 return (TRUE); 556 } 557 } else { 558 /* 559 * Go from start to hint, *inclusively* 560 */ 561 last = cur->next; 562 cur = map->header.next; 563 } 564 565 /* 566 * Search linearly 567 */ 568 569 while (cur != last) { 570 if (cur->end > address) { 571 if (address >= cur->start) { 572 /* 573 * Save this lookup for future hints, and 574 * return 575 */ 576 577 *entry = cur; 578 SAVE_HINT(map, cur); 579 return (TRUE); 580 } 581 break; 582 } 583 cur = cur->next; 584 } 585 *entry = cur->prev; 586 SAVE_HINT(map, *entry); 587 return (FALSE); 588 } 589 590 /* 591 * vm_map_insert: 592 * 593 * Inserts the given whole VM object into the target 594 * map at the specified address range. The object's 595 * size should match that of the address range. 596 * 597 * Requires that the map be locked, and leaves it so. 598 */ 599 int 600 vm_map_insert(map, object, offset, start, end, prot, max, cow) 601 vm_map_t map; 602 vm_object_t object; 603 vm_ooffset_t offset; 604 vm_offset_t start; 605 vm_offset_t end; 606 vm_prot_t prot, max; 607 int cow; 608 { 609 register vm_map_entry_t new_entry; 610 register vm_map_entry_t prev_entry; 611 vm_map_entry_t temp_entry; 612 vm_object_t prev_object; 613 u_char protoeflags; 614 615 if ((object != NULL) && (cow & MAP_NOFAULT)) { 616 panic("vm_map_insert: paradoxical MAP_NOFAULT request"); 617 } 618 619 /* 620 * Check that the start and end points are not bogus. 621 */ 622 623 if ((start < map->min_offset) || (end > map->max_offset) || 624 (start >= end)) 625 return (KERN_INVALID_ADDRESS); 626 627 /* 628 * Find the entry prior to the proposed starting address; if it's part 629 * of an existing entry, this range is bogus. 630 */ 631 632 if (vm_map_lookup_entry(map, start, &temp_entry)) 633 return (KERN_NO_SPACE); 634 635 prev_entry = temp_entry; 636 637 /* 638 * Assert that the next entry doesn't overlap the end point. 639 */ 640 641 if ((prev_entry->next != &map->header) && 642 (prev_entry->next->start < end)) 643 return (KERN_NO_SPACE); 644 645 protoeflags = 0; 646 if (cow & MAP_COPY_NEEDED) 647 protoeflags |= MAP_ENTRY_NEEDS_COPY; 648 649 if (cow & MAP_COPY_ON_WRITE) 650 protoeflags |= MAP_ENTRY_COW; 651 652 if (cow & MAP_NOFAULT) 653 protoeflags |= MAP_ENTRY_NOFAULT; 654 655 /* 656 * See if we can avoid creating a new entry by extending one of our 657 * neighbors. Or at least extend the object. 658 */ 659 660 if ((object == NULL) && 661 (prev_entry != &map->header) && 662 (( prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) && 663 (prev_entry->end == start) && 664 (prev_entry->wired_count == 0)) { 665 666 667 if ((protoeflags == prev_entry->eflags) && 668 ((cow & MAP_NOFAULT) || 669 vm_object_coalesce(prev_entry->object.vm_object, 670 OFF_TO_IDX(prev_entry->offset), 671 (vm_size_t) (prev_entry->end - prev_entry->start), 672 (vm_size_t) (end - prev_entry->end)))) { 673 674 /* 675 * Coalesced the two objects. Can we extend the 676 * previous map entry to include the new range? 677 */ 678 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 679 (prev_entry->protection == prot) && 680 (prev_entry->max_protection == max)) { 681 682 map->size += (end - prev_entry->end); 683 prev_entry->end = end; 684 if ((cow & MAP_NOFAULT) == 0) { 685 prev_object = prev_entry->object.vm_object; 686 default_pager_convert_to_swapq(prev_object); 687 } 688 return (KERN_SUCCESS); 689 } 690 else { 691 object = prev_entry->object.vm_object; 692 offset = prev_entry->offset + (prev_entry->end - 693 prev_entry->start); 694 695 vm_object_reference(object); 696 } 697 } 698 } 699 700 /* 701 * Create a new entry 702 */ 703 704 new_entry = vm_map_entry_create(map); 705 new_entry->start = start; 706 new_entry->end = end; 707 708 new_entry->eflags = protoeflags; 709 new_entry->object.vm_object = object; 710 new_entry->offset = offset; 711 712 if (map->is_main_map) { 713 new_entry->inheritance = VM_INHERIT_DEFAULT; 714 new_entry->protection = prot; 715 new_entry->max_protection = max; 716 new_entry->wired_count = 0; 717 } 718 /* 719 * Insert the new entry into the list 720 */ 721 722 vm_map_entry_link(map, prev_entry, new_entry); 723 map->size += new_entry->end - new_entry->start; 724 725 /* 726 * Update the free space hint 727 */ 728 if ((map->first_free == prev_entry) && 729 (prev_entry->end >= new_entry->start)) 730 map->first_free = new_entry; 731 732 default_pager_convert_to_swapq(object); 733 return (KERN_SUCCESS); 734 } 735 736 /* 737 * Find sufficient space for `length' bytes in the given map, starting at 738 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 739 */ 740 int 741 vm_map_findspace(map, start, length, addr) 742 register vm_map_t map; 743 register vm_offset_t start; 744 vm_size_t length; 745 vm_offset_t *addr; 746 { 747 register vm_map_entry_t entry, next; 748 register vm_offset_t end; 749 750 if (start < map->min_offset) 751 start = map->min_offset; 752 if (start > map->max_offset) 753 return (1); 754 755 /* 756 * Look for the first possible address; if there's already something 757 * at this address, we have to start after it. 758 */ 759 if (start == map->min_offset) { 760 if ((entry = map->first_free) != &map->header) 761 start = entry->end; 762 } else { 763 vm_map_entry_t tmp; 764 765 if (vm_map_lookup_entry(map, start, &tmp)) 766 start = tmp->end; 767 entry = tmp; 768 } 769 770 /* 771 * Look through the rest of the map, trying to fit a new region in the 772 * gap between existing regions, or after the very last region. 773 */ 774 for (;; start = (entry = next)->end) { 775 /* 776 * Find the end of the proposed new region. Be sure we didn't 777 * go beyond the end of the map, or wrap around the address; 778 * if so, we lose. Otherwise, if this is the last entry, or 779 * if the proposed new region fits before the next entry, we 780 * win. 781 */ 782 end = start + length; 783 if (end > map->max_offset || end < start) 784 return (1); 785 next = entry->next; 786 if (next == &map->header || next->start >= end) 787 break; 788 } 789 SAVE_HINT(map, entry); 790 *addr = start; 791 if (map == kernel_map && round_page(start + length) > kernel_vm_end) 792 pmap_growkernel(round_page(start + length)); 793 return (0); 794 } 795 796 /* 797 * vm_map_find finds an unallocated region in the target address 798 * map with the given length. The search is defined to be 799 * first-fit from the specified address; the region found is 800 * returned in the same parameter. 801 * 802 */ 803 int 804 vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow) 805 vm_map_t map; 806 vm_object_t object; 807 vm_ooffset_t offset; 808 vm_offset_t *addr; /* IN/OUT */ 809 vm_size_t length; 810 boolean_t find_space; 811 vm_prot_t prot, max; 812 int cow; 813 { 814 register vm_offset_t start; 815 int result, s = 0; 816 817 start = *addr; 818 819 if (map == kmem_map || map == mb_map) 820 s = splvm(); 821 822 vm_map_lock(map); 823 if (find_space) { 824 if (vm_map_findspace(map, start, length, addr)) { 825 vm_map_unlock(map); 826 if (map == kmem_map || map == mb_map) 827 splx(s); 828 return (KERN_NO_SPACE); 829 } 830 start = *addr; 831 } 832 result = vm_map_insert(map, object, offset, 833 start, start + length, prot, max, cow); 834 vm_map_unlock(map); 835 836 if (map == kmem_map || map == mb_map) 837 splx(s); 838 839 return (result); 840 } 841 842 /* 843 * vm_map_simplify_entry: 844 * 845 * Simplify the given map entry by merging with either neighbor. 846 */ 847 void 848 vm_map_simplify_entry(map, entry) 849 vm_map_t map; 850 vm_map_entry_t entry; 851 { 852 vm_map_entry_t next, prev; 853 vm_size_t prevsize, esize; 854 855 if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP)) 856 return; 857 858 prev = entry->prev; 859 if (prev != &map->header) { 860 prevsize = prev->end - prev->start; 861 if ( (prev->end == entry->start) && 862 (prev->object.vm_object == entry->object.vm_object) && 863 (!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) && 864 (!prev->object.vm_object || 865 (prev->offset + prevsize == entry->offset)) && 866 (prev->eflags == entry->eflags) && 867 (prev->protection == entry->protection) && 868 (prev->max_protection == entry->max_protection) && 869 (prev->inheritance == entry->inheritance) && 870 (prev->wired_count == entry->wired_count)) { 871 if (map->first_free == prev) 872 map->first_free = entry; 873 if (map->hint == prev) 874 map->hint = entry; 875 vm_map_entry_unlink(map, prev); 876 entry->start = prev->start; 877 entry->offset = prev->offset; 878 if (prev->object.vm_object) 879 vm_object_deallocate(prev->object.vm_object); 880 vm_map_entry_dispose(map, prev); 881 } 882 } 883 884 next = entry->next; 885 if (next != &map->header) { 886 esize = entry->end - entry->start; 887 if ((entry->end == next->start) && 888 (next->object.vm_object == entry->object.vm_object) && 889 (!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) && 890 (!entry->object.vm_object || 891 (entry->offset + esize == next->offset)) && 892 (next->eflags == entry->eflags) && 893 (next->protection == entry->protection) && 894 (next->max_protection == entry->max_protection) && 895 (next->inheritance == entry->inheritance) && 896 (next->wired_count == entry->wired_count)) { 897 if (map->first_free == next) 898 map->first_free = entry; 899 if (map->hint == next) 900 map->hint = entry; 901 vm_map_entry_unlink(map, next); 902 entry->end = next->end; 903 if (next->object.vm_object) 904 vm_object_deallocate(next->object.vm_object); 905 vm_map_entry_dispose(map, next); 906 } 907 } 908 } 909 /* 910 * vm_map_clip_start: [ internal use only ] 911 * 912 * Asserts that the given entry begins at or after 913 * the specified address; if necessary, 914 * it splits the entry into two. 915 */ 916 #define vm_map_clip_start(map, entry, startaddr) \ 917 { \ 918 if (startaddr > entry->start) \ 919 _vm_map_clip_start(map, entry, startaddr); \ 920 } 921 922 /* 923 * This routine is called only when it is known that 924 * the entry must be split. 925 */ 926 static void 927 _vm_map_clip_start(map, entry, start) 928 register vm_map_t map; 929 register vm_map_entry_t entry; 930 register vm_offset_t start; 931 { 932 register vm_map_entry_t new_entry; 933 934 /* 935 * Split off the front portion -- note that we must insert the new 936 * entry BEFORE this one, so that this entry has the specified 937 * starting address. 938 */ 939 940 vm_map_simplify_entry(map, entry); 941 942 new_entry = vm_map_entry_create(map); 943 *new_entry = *entry; 944 945 new_entry->end = start; 946 entry->offset += (start - entry->start); 947 entry->start = start; 948 949 vm_map_entry_link(map, entry->prev, new_entry); 950 951 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 952 vm_map_reference(new_entry->object.share_map); 953 else 954 vm_object_reference(new_entry->object.vm_object); 955 } 956 957 /* 958 * vm_map_clip_end: [ internal use only ] 959 * 960 * Asserts that the given entry ends at or before 961 * the specified address; if necessary, 962 * it splits the entry into two. 963 */ 964 965 #define vm_map_clip_end(map, entry, endaddr) \ 966 { \ 967 if (endaddr < entry->end) \ 968 _vm_map_clip_end(map, entry, endaddr); \ 969 } 970 971 /* 972 * This routine is called only when it is known that 973 * the entry must be split. 974 */ 975 static void 976 _vm_map_clip_end(map, entry, end) 977 register vm_map_t map; 978 register vm_map_entry_t entry; 979 register vm_offset_t end; 980 { 981 register vm_map_entry_t new_entry; 982 983 /* 984 * Create a new entry and insert it AFTER the specified entry 985 */ 986 987 new_entry = vm_map_entry_create(map); 988 *new_entry = *entry; 989 990 new_entry->start = entry->end = end; 991 new_entry->offset += (end - entry->start); 992 993 vm_map_entry_link(map, entry, new_entry); 994 995 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 996 vm_map_reference(new_entry->object.share_map); 997 else 998 vm_object_reference(new_entry->object.vm_object); 999 } 1000 1001 /* 1002 * VM_MAP_RANGE_CHECK: [ internal use only ] 1003 * 1004 * Asserts that the starting and ending region 1005 * addresses fall within the valid range of the map. 1006 */ 1007 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1008 { \ 1009 if (start < vm_map_min(map)) \ 1010 start = vm_map_min(map); \ 1011 if (end > vm_map_max(map)) \ 1012 end = vm_map_max(map); \ 1013 if (start > end) \ 1014 start = end; \ 1015 } 1016 1017 /* 1018 * vm_map_submap: [ kernel use only ] 1019 * 1020 * Mark the given range as handled by a subordinate map. 1021 * 1022 * This range must have been created with vm_map_find, 1023 * and no other operations may have been performed on this 1024 * range prior to calling vm_map_submap. 1025 * 1026 * Only a limited number of operations can be performed 1027 * within this rage after calling vm_map_submap: 1028 * vm_fault 1029 * [Don't try vm_map_copy!] 1030 * 1031 * To remove a submapping, one must first remove the 1032 * range from the superior map, and then destroy the 1033 * submap (if desired). [Better yet, don't try it.] 1034 */ 1035 int 1036 vm_map_submap(map, start, end, submap) 1037 register vm_map_t map; 1038 register vm_offset_t start; 1039 register vm_offset_t end; 1040 vm_map_t submap; 1041 { 1042 vm_map_entry_t entry; 1043 register int result = KERN_INVALID_ARGUMENT; 1044 1045 vm_map_lock(map); 1046 1047 VM_MAP_RANGE_CHECK(map, start, end); 1048 1049 if (vm_map_lookup_entry(map, start, &entry)) { 1050 vm_map_clip_start(map, entry, start); 1051 } else 1052 entry = entry->next; 1053 1054 vm_map_clip_end(map, entry, end); 1055 1056 if ((entry->start == start) && (entry->end == end) && 1057 ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) && 1058 (entry->object.vm_object == NULL)) { 1059 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1060 vm_map_reference(entry->object.sub_map = submap); 1061 result = KERN_SUCCESS; 1062 } 1063 vm_map_unlock(map); 1064 1065 return (result); 1066 } 1067 1068 /* 1069 * vm_map_protect: 1070 * 1071 * Sets the protection of the specified address 1072 * region in the target map. If "set_max" is 1073 * specified, the maximum protection is to be set; 1074 * otherwise, only the current protection is affected. 1075 */ 1076 int 1077 vm_map_protect(map, start, end, new_prot, set_max) 1078 register vm_map_t map; 1079 register vm_offset_t start; 1080 register vm_offset_t end; 1081 register vm_prot_t new_prot; 1082 register boolean_t set_max; 1083 { 1084 register vm_map_entry_t current; 1085 vm_map_entry_t entry; 1086 1087 vm_map_lock(map); 1088 1089 VM_MAP_RANGE_CHECK(map, start, end); 1090 1091 if (vm_map_lookup_entry(map, start, &entry)) { 1092 vm_map_clip_start(map, entry, start); 1093 } else { 1094 entry = entry->next; 1095 } 1096 1097 /* 1098 * Make a first pass to check for protection violations. 1099 */ 1100 1101 current = entry; 1102 while ((current != &map->header) && (current->start < end)) { 1103 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1104 vm_map_unlock(map); 1105 return (KERN_INVALID_ARGUMENT); 1106 } 1107 if ((new_prot & current->max_protection) != new_prot) { 1108 vm_map_unlock(map); 1109 return (KERN_PROTECTION_FAILURE); 1110 } 1111 current = current->next; 1112 } 1113 1114 /* 1115 * Go back and fix up protections. [Note that clipping is not 1116 * necessary the second time.] 1117 */ 1118 1119 current = entry; 1120 1121 while ((current != &map->header) && (current->start < end)) { 1122 vm_prot_t old_prot; 1123 1124 vm_map_clip_end(map, current, end); 1125 1126 old_prot = current->protection; 1127 if (set_max) 1128 current->protection = 1129 (current->max_protection = new_prot) & 1130 old_prot; 1131 else 1132 current->protection = new_prot; 1133 1134 /* 1135 * Update physical map if necessary. Worry about copy-on-write 1136 * here -- CHECK THIS XXX 1137 */ 1138 1139 if (current->protection != old_prot) { 1140 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1141 VM_PROT_ALL) 1142 #define max(a,b) ((a) > (b) ? (a) : (b)) 1143 1144 if (current->eflags & MAP_ENTRY_IS_A_MAP) { 1145 vm_map_entry_t share_entry; 1146 vm_offset_t share_end; 1147 1148 vm_map_lock(current->object.share_map); 1149 (void) vm_map_lookup_entry( 1150 current->object.share_map, 1151 current->offset, 1152 &share_entry); 1153 share_end = current->offset + 1154 (current->end - current->start); 1155 while ((share_entry != 1156 ¤t->object.share_map->header) && 1157 (share_entry->start < share_end)) { 1158 1159 pmap_protect(map->pmap, 1160 (max(share_entry->start, 1161 current->offset) - 1162 current->offset + 1163 current->start), 1164 min(share_entry->end, 1165 share_end) - 1166 current->offset + 1167 current->start, 1168 current->protection & 1169 MASK(share_entry)); 1170 1171 share_entry = share_entry->next; 1172 } 1173 vm_map_unlock(current->object.share_map); 1174 } else 1175 pmap_protect(map->pmap, current->start, 1176 current->end, 1177 current->protection & MASK(entry)); 1178 #undef max 1179 #undef MASK 1180 } 1181 1182 vm_map_simplify_entry(map, current); 1183 1184 current = current->next; 1185 } 1186 1187 vm_map_unlock(map); 1188 return (KERN_SUCCESS); 1189 } 1190 1191 /* 1192 * vm_map_madvise: 1193 * 1194 * This routine traverses a processes map handling the madvise 1195 * system call. 1196 */ 1197 void 1198 vm_map_madvise(map, pmap, start, end, advise) 1199 vm_map_t map; 1200 pmap_t pmap; 1201 vm_offset_t start, end; 1202 int advise; 1203 { 1204 register vm_map_entry_t current; 1205 vm_map_entry_t entry; 1206 1207 vm_map_lock(map); 1208 1209 VM_MAP_RANGE_CHECK(map, start, end); 1210 1211 if (vm_map_lookup_entry(map, start, &entry)) { 1212 vm_map_clip_start(map, entry, start); 1213 } else 1214 entry = entry->next; 1215 1216 for(current = entry; 1217 (current != &map->header) && (current->start < end); 1218 current = current->next) { 1219 vm_size_t size = current->end - current->start; 1220 1221 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1222 continue; 1223 } 1224 1225 /* 1226 * Create an object if needed 1227 */ 1228 if (current->object.vm_object == NULL) { 1229 vm_object_t object; 1230 object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(size)); 1231 current->object.vm_object = object; 1232 current->offset = 0; 1233 } 1234 1235 vm_map_clip_end(map, current, end); 1236 switch (advise) { 1237 case MADV_NORMAL: 1238 current->object.vm_object->behavior = OBJ_NORMAL; 1239 break; 1240 case MADV_SEQUENTIAL: 1241 current->object.vm_object->behavior = OBJ_SEQUENTIAL; 1242 break; 1243 case MADV_RANDOM: 1244 current->object.vm_object->behavior = OBJ_RANDOM; 1245 break; 1246 /* 1247 * Right now, we could handle DONTNEED and WILLNEED with common code. 1248 * They are mostly the same, except for the potential async reads (NYI). 1249 */ 1250 case MADV_FREE: 1251 case MADV_DONTNEED: 1252 { 1253 vm_pindex_t pindex; 1254 int count; 1255 size = current->end - current->start; 1256 pindex = OFF_TO_IDX(entry->offset); 1257 count = OFF_TO_IDX(size); 1258 /* 1259 * MADV_DONTNEED removes the page from all 1260 * pmaps, so pmap_remove is not necessary. 1261 */ 1262 vm_object_madvise(current->object.vm_object, 1263 pindex, count, advise); 1264 } 1265 break; 1266 1267 case MADV_WILLNEED: 1268 { 1269 vm_pindex_t pindex; 1270 int count; 1271 size = current->end - current->start; 1272 pindex = OFF_TO_IDX(current->offset); 1273 count = OFF_TO_IDX(size); 1274 vm_object_madvise(current->object.vm_object, 1275 pindex, count, advise); 1276 pmap_object_init_pt(pmap, current->start, 1277 current->object.vm_object, pindex, 1278 (count << PAGE_SHIFT), 0); 1279 } 1280 break; 1281 1282 default: 1283 break; 1284 } 1285 } 1286 1287 vm_map_simplify_entry(map, entry); 1288 vm_map_unlock(map); 1289 return; 1290 } 1291 1292 1293 /* 1294 * vm_map_inherit: 1295 * 1296 * Sets the inheritance of the specified address 1297 * range in the target map. Inheritance 1298 * affects how the map will be shared with 1299 * child maps at the time of vm_map_fork. 1300 */ 1301 int 1302 vm_map_inherit(map, start, end, new_inheritance) 1303 register vm_map_t map; 1304 register vm_offset_t start; 1305 register vm_offset_t end; 1306 register vm_inherit_t new_inheritance; 1307 { 1308 register vm_map_entry_t entry; 1309 vm_map_entry_t temp_entry; 1310 1311 switch (new_inheritance) { 1312 case VM_INHERIT_NONE: 1313 case VM_INHERIT_COPY: 1314 case VM_INHERIT_SHARE: 1315 break; 1316 default: 1317 return (KERN_INVALID_ARGUMENT); 1318 } 1319 1320 vm_map_lock(map); 1321 1322 VM_MAP_RANGE_CHECK(map, start, end); 1323 1324 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1325 entry = temp_entry; 1326 vm_map_clip_start(map, entry, start); 1327 } else 1328 entry = temp_entry->next; 1329 1330 while ((entry != &map->header) && (entry->start < end)) { 1331 vm_map_clip_end(map, entry, end); 1332 1333 entry->inheritance = new_inheritance; 1334 1335 entry = entry->next; 1336 } 1337 1338 vm_map_simplify_entry(map, temp_entry); 1339 vm_map_unlock(map); 1340 return (KERN_SUCCESS); 1341 } 1342 1343 /* 1344 * Implement the semantics of mlock 1345 */ 1346 int 1347 vm_map_user_pageable(map, start, end, new_pageable) 1348 register vm_map_t map; 1349 register vm_offset_t start; 1350 register vm_offset_t end; 1351 register boolean_t new_pageable; 1352 { 1353 register vm_map_entry_t entry; 1354 vm_map_entry_t start_entry; 1355 register vm_offset_t failed = 0; 1356 int rv; 1357 1358 vm_map_lock(map); 1359 VM_MAP_RANGE_CHECK(map, start, end); 1360 1361 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1362 vm_map_unlock(map); 1363 return (KERN_INVALID_ADDRESS); 1364 } 1365 1366 if (new_pageable) { 1367 1368 entry = start_entry; 1369 vm_map_clip_start(map, entry, start); 1370 1371 /* 1372 * Now decrement the wiring count for each region. If a region 1373 * becomes completely unwired, unwire its physical pages and 1374 * mappings. 1375 */ 1376 vm_map_set_recursive(map); 1377 1378 entry = start_entry; 1379 while ((entry != &map->header) && (entry->start < end)) { 1380 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1381 vm_map_clip_end(map, entry, end); 1382 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1383 entry->wired_count--; 1384 if (entry->wired_count == 0) 1385 vm_fault_unwire(map, entry->start, entry->end); 1386 } 1387 entry = entry->next; 1388 } 1389 vm_map_simplify_entry(map, start_entry); 1390 vm_map_clear_recursive(map); 1391 } else { 1392 1393 /* 1394 * Because of the possiblity of blocking, etc. We restart 1395 * through the process's map entries from beginning so that 1396 * we don't end up depending on a map entry that could have 1397 * changed. 1398 */ 1399 rescan: 1400 1401 entry = start_entry; 1402 1403 while ((entry != &map->header) && (entry->start < end)) { 1404 1405 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1406 entry = entry->next; 1407 continue; 1408 } 1409 1410 if (entry->wired_count != 0) { 1411 entry->wired_count++; 1412 entry->eflags |= MAP_ENTRY_USER_WIRED; 1413 entry = entry->next; 1414 continue; 1415 } 1416 1417 /* Here on entry being newly wired */ 1418 1419 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 1420 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1421 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1422 1423 vm_object_shadow(&entry->object.vm_object, 1424 &entry->offset, 1425 OFF_TO_IDX(entry->end 1426 - entry->start)); 1427 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1428 1429 } else if (entry->object.vm_object == NULL) { 1430 1431 entry->object.vm_object = 1432 vm_object_allocate(OBJT_DEFAULT, 1433 OFF_TO_IDX(entry->end - entry->start)); 1434 entry->offset = (vm_offset_t) 0; 1435 1436 } 1437 default_pager_convert_to_swapq(entry->object.vm_object); 1438 } 1439 1440 vm_map_clip_start(map, entry, start); 1441 vm_map_clip_end(map, entry, end); 1442 1443 entry->wired_count++; 1444 entry->eflags |= MAP_ENTRY_USER_WIRED; 1445 1446 /* First we need to allow map modifications */ 1447 vm_map_set_recursive(map); 1448 if (lockmgr(&map->lock, LK_EXCLUPGRADE, 1449 (void *)0, curproc)) { 1450 entry->wired_count--; 1451 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1452 1453 vm_map_clear_recursive(map); 1454 vm_map_unlock(map); 1455 1456 (void) vm_map_user_pageable(map, start, entry->start, TRUE); 1457 return rv; 1458 } 1459 1460 1461 rv = vm_fault_user_wire(map, entry->start, entry->end); 1462 if (rv) { 1463 1464 entry->wired_count--; 1465 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1466 1467 vm_map_clear_recursive(map); 1468 vm_map_unlock(map); 1469 1470 (void) vm_map_user_pageable(map, start, entry->start, TRUE); 1471 return rv; 1472 } 1473 1474 vm_map_clear_recursive(map); 1475 lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc); 1476 1477 goto rescan; 1478 } 1479 } 1480 vm_map_unlock(map); 1481 return KERN_SUCCESS; 1482 } 1483 1484 /* 1485 * vm_map_pageable: 1486 * 1487 * Sets the pageability of the specified address 1488 * range in the target map. Regions specified 1489 * as not pageable require locked-down physical 1490 * memory and physical page maps. 1491 * 1492 * The map must not be locked, but a reference 1493 * must remain to the map throughout the call. 1494 */ 1495 int 1496 vm_map_pageable(map, start, end, new_pageable) 1497 register vm_map_t map; 1498 register vm_offset_t start; 1499 register vm_offset_t end; 1500 register boolean_t new_pageable; 1501 { 1502 register vm_map_entry_t entry; 1503 vm_map_entry_t start_entry; 1504 register vm_offset_t failed = 0; 1505 int rv; 1506 1507 vm_map_lock(map); 1508 1509 VM_MAP_RANGE_CHECK(map, start, end); 1510 1511 /* 1512 * Only one pageability change may take place at one time, since 1513 * vm_fault assumes it will be called only once for each 1514 * wiring/unwiring. Therefore, we have to make sure we're actually 1515 * changing the pageability for the entire region. We do so before 1516 * making any changes. 1517 */ 1518 1519 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1520 vm_map_unlock(map); 1521 return (KERN_INVALID_ADDRESS); 1522 } 1523 entry = start_entry; 1524 1525 /* 1526 * Actions are rather different for wiring and unwiring, so we have 1527 * two separate cases. 1528 */ 1529 1530 if (new_pageable) { 1531 1532 vm_map_clip_start(map, entry, start); 1533 1534 /* 1535 * Unwiring. First ensure that the range to be unwired is 1536 * really wired down and that there are no holes. 1537 */ 1538 while ((entry != &map->header) && (entry->start < end)) { 1539 1540 if (entry->wired_count == 0 || 1541 (entry->end < end && 1542 (entry->next == &map->header || 1543 entry->next->start > entry->end))) { 1544 vm_map_unlock(map); 1545 return (KERN_INVALID_ARGUMENT); 1546 } 1547 entry = entry->next; 1548 } 1549 1550 /* 1551 * Now decrement the wiring count for each region. If a region 1552 * becomes completely unwired, unwire its physical pages and 1553 * mappings. 1554 */ 1555 vm_map_set_recursive(map); 1556 1557 entry = start_entry; 1558 while ((entry != &map->header) && (entry->start < end)) { 1559 vm_map_clip_end(map, entry, end); 1560 1561 entry->wired_count--; 1562 if (entry->wired_count == 0) 1563 vm_fault_unwire(map, entry->start, entry->end); 1564 1565 entry = entry->next; 1566 } 1567 vm_map_simplify_entry(map, start_entry); 1568 vm_map_clear_recursive(map); 1569 } else { 1570 /* 1571 * Wiring. We must do this in two passes: 1572 * 1573 * 1. Holding the write lock, we create any shadow or zero-fill 1574 * objects that need to be created. Then we clip each map 1575 * entry to the region to be wired and increment its wiring 1576 * count. We create objects before clipping the map entries 1577 * to avoid object proliferation. 1578 * 1579 * 2. We downgrade to a read lock, and call vm_fault_wire to 1580 * fault in the pages for any newly wired area (wired_count is 1581 * 1). 1582 * 1583 * Downgrading to a read lock for vm_fault_wire avoids a possible 1584 * deadlock with another process that may have faulted on one 1585 * of the pages to be wired (it would mark the page busy, 1586 * blocking us, then in turn block on the map lock that we 1587 * hold). Because of problems in the recursive lock package, 1588 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1589 * any actions that require the write lock must be done 1590 * beforehand. Because we keep the read lock on the map, the 1591 * copy-on-write status of the entries we modify here cannot 1592 * change. 1593 */ 1594 1595 /* 1596 * Pass 1. 1597 */ 1598 while ((entry != &map->header) && (entry->start < end)) { 1599 if (entry->wired_count == 0) { 1600 1601 /* 1602 * Perform actions of vm_map_lookup that need 1603 * the write lock on the map: create a shadow 1604 * object for a copy-on-write region, or an 1605 * object for a zero-fill region. 1606 * 1607 * We don't have to do this for entries that 1608 * point to sharing maps, because we won't 1609 * hold the lock on the sharing map. 1610 */ 1611 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 1612 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1613 if (copyflag && 1614 ((entry->protection & VM_PROT_WRITE) != 0)) { 1615 1616 vm_object_shadow(&entry->object.vm_object, 1617 &entry->offset, 1618 OFF_TO_IDX(entry->end 1619 - entry->start)); 1620 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1621 } else if (entry->object.vm_object == NULL) { 1622 entry->object.vm_object = 1623 vm_object_allocate(OBJT_DEFAULT, 1624 OFF_TO_IDX(entry->end - entry->start)); 1625 entry->offset = (vm_offset_t) 0; 1626 } 1627 default_pager_convert_to_swapq(entry->object.vm_object); 1628 } 1629 } 1630 vm_map_clip_start(map, entry, start); 1631 vm_map_clip_end(map, entry, end); 1632 entry->wired_count++; 1633 1634 /* 1635 * Check for holes 1636 */ 1637 if (entry->end < end && 1638 (entry->next == &map->header || 1639 entry->next->start > entry->end)) { 1640 /* 1641 * Found one. Object creation actions do not 1642 * need to be undone, but the wired counts 1643 * need to be restored. 1644 */ 1645 while (entry != &map->header && entry->end > start) { 1646 entry->wired_count--; 1647 entry = entry->prev; 1648 } 1649 vm_map_unlock(map); 1650 return (KERN_INVALID_ARGUMENT); 1651 } 1652 entry = entry->next; 1653 } 1654 1655 /* 1656 * Pass 2. 1657 */ 1658 1659 /* 1660 * HACK HACK HACK HACK 1661 * 1662 * If we are wiring in the kernel map or a submap of it, 1663 * unlock the map to avoid deadlocks. We trust that the 1664 * kernel is well-behaved, and therefore will not do 1665 * anything destructive to this region of the map while 1666 * we have it unlocked. We cannot trust user processes 1667 * to do the same. 1668 * 1669 * HACK HACK HACK HACK 1670 */ 1671 if (vm_map_pmap(map) == kernel_pmap) { 1672 vm_map_unlock(map); /* trust me ... */ 1673 } else { 1674 vm_map_set_recursive(map); 1675 lockmgr(&map->lock, LK_DOWNGRADE, (void*)0, curproc); 1676 } 1677 1678 rv = 0; 1679 entry = start_entry; 1680 while (entry != &map->header && entry->start < end) { 1681 /* 1682 * If vm_fault_wire fails for any page we need to undo 1683 * what has been done. We decrement the wiring count 1684 * for those pages which have not yet been wired (now) 1685 * and unwire those that have (later). 1686 * 1687 * XXX this violates the locking protocol on the map, 1688 * needs to be fixed. 1689 */ 1690 if (rv) 1691 entry->wired_count--; 1692 else if (entry->wired_count == 1) { 1693 rv = vm_fault_wire(map, entry->start, entry->end); 1694 if (rv) { 1695 failed = entry->start; 1696 entry->wired_count--; 1697 } 1698 } 1699 entry = entry->next; 1700 } 1701 1702 if (vm_map_pmap(map) == kernel_pmap) { 1703 vm_map_lock(map); 1704 } else { 1705 vm_map_clear_recursive(map); 1706 } 1707 if (rv) { 1708 vm_map_unlock(map); 1709 (void) vm_map_pageable(map, start, failed, TRUE); 1710 return (rv); 1711 } 1712 vm_map_simplify_entry(map, start_entry); 1713 } 1714 1715 vm_map_unlock(map); 1716 1717 return (KERN_SUCCESS); 1718 } 1719 1720 /* 1721 * vm_map_clean 1722 * 1723 * Push any dirty cached pages in the address range to their pager. 1724 * If syncio is TRUE, dirty pages are written synchronously. 1725 * If invalidate is TRUE, any cached pages are freed as well. 1726 * 1727 * Returns an error if any part of the specified range is not mapped. 1728 */ 1729 int 1730 vm_map_clean(map, start, end, syncio, invalidate) 1731 vm_map_t map; 1732 vm_offset_t start; 1733 vm_offset_t end; 1734 boolean_t syncio; 1735 boolean_t invalidate; 1736 { 1737 register vm_map_entry_t current; 1738 vm_map_entry_t entry; 1739 vm_size_t size; 1740 vm_object_t object; 1741 vm_ooffset_t offset; 1742 1743 vm_map_lock_read(map); 1744 VM_MAP_RANGE_CHECK(map, start, end); 1745 if (!vm_map_lookup_entry(map, start, &entry)) { 1746 vm_map_unlock_read(map); 1747 return (KERN_INVALID_ADDRESS); 1748 } 1749 /* 1750 * Make a first pass to check for holes. 1751 */ 1752 for (current = entry; current->start < end; current = current->next) { 1753 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1754 vm_map_unlock_read(map); 1755 return (KERN_INVALID_ARGUMENT); 1756 } 1757 if (end > current->end && 1758 (current->next == &map->header || 1759 current->end != current->next->start)) { 1760 vm_map_unlock_read(map); 1761 return (KERN_INVALID_ADDRESS); 1762 } 1763 } 1764 1765 /* 1766 * Make a second pass, cleaning/uncaching pages from the indicated 1767 * objects as we go. 1768 */ 1769 for (current = entry; current->start < end; current = current->next) { 1770 offset = current->offset + (start - current->start); 1771 size = (end <= current->end ? end : current->end) - start; 1772 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1773 register vm_map_t smap; 1774 vm_map_entry_t tentry; 1775 vm_size_t tsize; 1776 1777 smap = current->object.share_map; 1778 vm_map_lock_read(smap); 1779 (void) vm_map_lookup_entry(smap, offset, &tentry); 1780 tsize = tentry->end - offset; 1781 if (tsize < size) 1782 size = tsize; 1783 object = tentry->object.vm_object; 1784 offset = tentry->offset + (offset - tentry->start); 1785 vm_map_unlock_read(smap); 1786 } else { 1787 object = current->object.vm_object; 1788 } 1789 /* 1790 * Note that there is absolutely no sense in writing out 1791 * anonymous objects, so we track down the vnode object 1792 * to write out. 1793 * We invalidate (remove) all pages from the address space 1794 * anyway, for semantic correctness. 1795 */ 1796 while (object->backing_object) { 1797 object = object->backing_object; 1798 offset += object->backing_object_offset; 1799 if (object->size < OFF_TO_IDX( offset + size)) 1800 size = IDX_TO_OFF(object->size) - offset; 1801 } 1802 if (invalidate) 1803 pmap_remove(vm_map_pmap(map), current->start, 1804 current->start + size); 1805 if (object && (object->type == OBJT_VNODE)) { 1806 /* 1807 * Flush pages if writing is allowed. XXX should we continue 1808 * on an error? 1809 * 1810 * XXX Doing async I/O and then removing all the pages from 1811 * the object before it completes is probably a very bad 1812 * idea. 1813 */ 1814 if (current->protection & VM_PROT_WRITE) { 1815 vm_object_page_clean(object, 1816 OFF_TO_IDX(offset), 1817 OFF_TO_IDX(offset + size), 1818 (syncio||invalidate)?1:0, TRUE); 1819 if (invalidate) 1820 vm_object_page_remove(object, 1821 OFF_TO_IDX(offset), 1822 OFF_TO_IDX(offset + size), 1823 FALSE); 1824 } 1825 } 1826 start += size; 1827 } 1828 1829 vm_map_unlock_read(map); 1830 return (KERN_SUCCESS); 1831 } 1832 1833 /* 1834 * vm_map_entry_unwire: [ internal use only ] 1835 * 1836 * Make the region specified by this entry pageable. 1837 * 1838 * The map in question should be locked. 1839 * [This is the reason for this routine's existence.] 1840 */ 1841 static void 1842 vm_map_entry_unwire(map, entry) 1843 vm_map_t map; 1844 register vm_map_entry_t entry; 1845 { 1846 vm_fault_unwire(map, entry->start, entry->end); 1847 entry->wired_count = 0; 1848 } 1849 1850 /* 1851 * vm_map_entry_delete: [ internal use only ] 1852 * 1853 * Deallocate the given entry from the target map. 1854 */ 1855 static void 1856 vm_map_entry_delete(map, entry) 1857 register vm_map_t map; 1858 register vm_map_entry_t entry; 1859 { 1860 vm_map_entry_unlink(map, entry); 1861 map->size -= entry->end - entry->start; 1862 1863 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1864 vm_map_deallocate(entry->object.share_map); 1865 } else { 1866 vm_object_deallocate(entry->object.vm_object); 1867 } 1868 1869 vm_map_entry_dispose(map, entry); 1870 } 1871 1872 /* 1873 * vm_map_delete: [ internal use only ] 1874 * 1875 * Deallocates the given address range from the target 1876 * map. 1877 * 1878 * When called with a sharing map, removes pages from 1879 * that region from all physical maps. 1880 */ 1881 int 1882 vm_map_delete(map, start, end) 1883 register vm_map_t map; 1884 vm_offset_t start; 1885 register vm_offset_t end; 1886 { 1887 register vm_map_entry_t entry; 1888 vm_map_entry_t first_entry; 1889 1890 /* 1891 * Find the start of the region, and clip it 1892 */ 1893 1894 if (!vm_map_lookup_entry(map, start, &first_entry)) 1895 entry = first_entry->next; 1896 else { 1897 entry = first_entry; 1898 vm_map_clip_start(map, entry, start); 1899 1900 /* 1901 * Fix the lookup hint now, rather than each time though the 1902 * loop. 1903 */ 1904 1905 SAVE_HINT(map, entry->prev); 1906 } 1907 1908 /* 1909 * Save the free space hint 1910 */ 1911 1912 if (entry == &map->header) { 1913 map->first_free = &map->header; 1914 } else if (map->first_free->start >= start) 1915 map->first_free = entry->prev; 1916 1917 /* 1918 * Step through all entries in this region 1919 */ 1920 1921 while ((entry != &map->header) && (entry->start < end)) { 1922 vm_map_entry_t next; 1923 vm_offset_t s, e; 1924 vm_object_t object; 1925 vm_ooffset_t offset; 1926 1927 vm_map_clip_end(map, entry, end); 1928 1929 next = entry->next; 1930 s = entry->start; 1931 e = entry->end; 1932 offset = entry->offset; 1933 1934 /* 1935 * Unwire before removing addresses from the pmap; otherwise, 1936 * unwiring will put the entries back in the pmap. 1937 */ 1938 1939 object = entry->object.vm_object; 1940 if (entry->wired_count != 0) 1941 vm_map_entry_unwire(map, entry); 1942 1943 /* 1944 * If this is a sharing map, we must remove *all* references 1945 * to this data, since we can't find all of the physical maps 1946 * which are sharing it. 1947 */ 1948 1949 if (object == kernel_object || object == kmem_object) { 1950 vm_object_page_remove(object, OFF_TO_IDX(offset), 1951 OFF_TO_IDX(offset + (e - s)), FALSE); 1952 } else if (!map->is_main_map) { 1953 vm_object_pmap_remove(object, 1954 OFF_TO_IDX(offset), 1955 OFF_TO_IDX(offset + (e - s))); 1956 } else { 1957 pmap_remove(map->pmap, s, e); 1958 } 1959 1960 /* 1961 * Delete the entry (which may delete the object) only after 1962 * removing all pmap entries pointing to its pages. 1963 * (Otherwise, its page frames may be reallocated, and any 1964 * modify bits will be set in the wrong object!) 1965 */ 1966 1967 vm_map_entry_delete(map, entry); 1968 entry = next; 1969 } 1970 return (KERN_SUCCESS); 1971 } 1972 1973 /* 1974 * vm_map_remove: 1975 * 1976 * Remove the given address range from the target map. 1977 * This is the exported form of vm_map_delete. 1978 */ 1979 int 1980 vm_map_remove(map, start, end) 1981 register vm_map_t map; 1982 register vm_offset_t start; 1983 register vm_offset_t end; 1984 { 1985 register int result, s = 0; 1986 1987 if (map == kmem_map || map == mb_map) 1988 s = splvm(); 1989 1990 vm_map_lock(map); 1991 VM_MAP_RANGE_CHECK(map, start, end); 1992 result = vm_map_delete(map, start, end); 1993 vm_map_unlock(map); 1994 1995 if (map == kmem_map || map == mb_map) 1996 splx(s); 1997 1998 return (result); 1999 } 2000 2001 /* 2002 * vm_map_check_protection: 2003 * 2004 * Assert that the target map allows the specified 2005 * privilege on the entire address region given. 2006 * The entire region must be allocated. 2007 */ 2008 boolean_t 2009 vm_map_check_protection(map, start, end, protection) 2010 register vm_map_t map; 2011 register vm_offset_t start; 2012 register vm_offset_t end; 2013 register vm_prot_t protection; 2014 { 2015 register vm_map_entry_t entry; 2016 vm_map_entry_t tmp_entry; 2017 2018 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2019 return (FALSE); 2020 } 2021 entry = tmp_entry; 2022 2023 while (start < end) { 2024 if (entry == &map->header) { 2025 return (FALSE); 2026 } 2027 /* 2028 * No holes allowed! 2029 */ 2030 2031 if (start < entry->start) { 2032 return (FALSE); 2033 } 2034 /* 2035 * Check protection associated with entry. 2036 */ 2037 2038 if ((entry->protection & protection) != protection) { 2039 return (FALSE); 2040 } 2041 /* go to next entry */ 2042 2043 start = entry->end; 2044 entry = entry->next; 2045 } 2046 return (TRUE); 2047 } 2048 2049 /* 2050 * vm_map_copy_entry: 2051 * 2052 * Copies the contents of the source entry to the destination 2053 * entry. The entries *must* be aligned properly. 2054 */ 2055 static void 2056 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 2057 vm_map_t src_map, dst_map; 2058 register vm_map_entry_t src_entry, dst_entry; 2059 { 2060 if ((dst_entry->eflags|src_entry->eflags) & 2061 (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) 2062 return; 2063 2064 if (src_entry->wired_count == 0) { 2065 2066 /* 2067 * If the source entry is marked needs_copy, it is already 2068 * write-protected. 2069 */ 2070 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2071 2072 boolean_t su; 2073 2074 /* 2075 * If the source entry has only one mapping, we can 2076 * just protect the virtual address range. 2077 */ 2078 if (!(su = src_map->is_main_map)) { 2079 su = (src_map->ref_count == 1); 2080 } 2081 if (su) { 2082 pmap_protect(src_map->pmap, 2083 src_entry->start, 2084 src_entry->end, 2085 src_entry->protection & ~VM_PROT_WRITE); 2086 } else { 2087 vm_object_pmap_copy(src_entry->object.vm_object, 2088 OFF_TO_IDX(src_entry->offset), 2089 OFF_TO_IDX(src_entry->offset + (src_entry->end 2090 - src_entry->start))); 2091 } 2092 } 2093 2094 /* 2095 * Make a copy of the object. 2096 */ 2097 if (src_entry->object.vm_object) { 2098 if ((src_entry->object.vm_object->handle == NULL) && 2099 (src_entry->object.vm_object->type == OBJT_DEFAULT || 2100 src_entry->object.vm_object->type == OBJT_SWAP)) 2101 vm_object_collapse(src_entry->object.vm_object); 2102 ++src_entry->object.vm_object->ref_count; 2103 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2104 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2105 dst_entry->object.vm_object = 2106 src_entry->object.vm_object; 2107 dst_entry->offset = src_entry->offset; 2108 } else { 2109 dst_entry->object.vm_object = NULL; 2110 dst_entry->offset = 0; 2111 } 2112 2113 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2114 dst_entry->end - dst_entry->start, src_entry->start); 2115 } else { 2116 /* 2117 * Of course, wired down pages can't be set copy-on-write. 2118 * Cause wired pages to be copied into the new map by 2119 * simulating faults (the new pages are pageable) 2120 */ 2121 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2122 } 2123 } 2124 2125 /* 2126 * vmspace_fork: 2127 * Create a new process vmspace structure and vm_map 2128 * based on those of an existing process. The new map 2129 * is based on the old map, according to the inheritance 2130 * values on the regions in that map. 2131 * 2132 * The source map must not be locked. 2133 */ 2134 struct vmspace * 2135 vmspace_fork(vm1) 2136 register struct vmspace *vm1; 2137 { 2138 register struct vmspace *vm2; 2139 vm_map_t old_map = &vm1->vm_map; 2140 vm_map_t new_map; 2141 vm_map_entry_t old_entry; 2142 vm_map_entry_t new_entry; 2143 pmap_t new_pmap; 2144 vm_object_t object; 2145 2146 vm_map_lock(old_map); 2147 2148 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 2149 old_map->entries_pageable); 2150 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2151 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2152 new_pmap = &vm2->vm_pmap; /* XXX */ 2153 new_map = &vm2->vm_map; /* XXX */ 2154 2155 old_entry = old_map->header.next; 2156 2157 while (old_entry != &old_map->header) { 2158 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2159 panic("vm_map_fork: encountered a submap"); 2160 2161 switch (old_entry->inheritance) { 2162 case VM_INHERIT_NONE: 2163 break; 2164 2165 case VM_INHERIT_SHARE: 2166 /* 2167 * Clone the entry, creating the shared object if necessary. 2168 */ 2169 object = old_entry->object.vm_object; 2170 if (object == NULL) { 2171 object = vm_object_allocate(OBJT_DEFAULT, 2172 OFF_TO_IDX(old_entry->end - 2173 old_entry->start)); 2174 old_entry->object.vm_object = object; 2175 old_entry->offset = (vm_offset_t) 0; 2176 } else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2177 vm_object_shadow(&old_entry->object.vm_object, 2178 &old_entry->offset, 2179 OFF_TO_IDX(old_entry->end - 2180 old_entry->start)); 2181 2182 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2183 object = old_entry->object.vm_object; 2184 } 2185 2186 /* 2187 * Clone the entry, referencing the sharing map. 2188 */ 2189 new_entry = vm_map_entry_create(new_map); 2190 *new_entry = *old_entry; 2191 new_entry->wired_count = 0; 2192 ++object->ref_count; 2193 2194 /* 2195 * Insert the entry into the new map -- we know we're 2196 * inserting at the end of the new map. 2197 */ 2198 2199 vm_map_entry_link(new_map, new_map->header.prev, 2200 new_entry); 2201 2202 /* 2203 * Update the physical map 2204 */ 2205 2206 pmap_copy(new_map->pmap, old_map->pmap, 2207 new_entry->start, 2208 (old_entry->end - old_entry->start), 2209 old_entry->start); 2210 break; 2211 2212 case VM_INHERIT_COPY: 2213 /* 2214 * Clone the entry and link into the map. 2215 */ 2216 new_entry = vm_map_entry_create(new_map); 2217 *new_entry = *old_entry; 2218 new_entry->wired_count = 0; 2219 new_entry->object.vm_object = NULL; 2220 new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP; 2221 vm_map_entry_link(new_map, new_map->header.prev, 2222 new_entry); 2223 vm_map_copy_entry(old_map, new_map, old_entry, 2224 new_entry); 2225 break; 2226 } 2227 old_entry = old_entry->next; 2228 } 2229 2230 new_map->size = old_map->size; 2231 vm_map_unlock(old_map); 2232 2233 return (vm2); 2234 } 2235 2236 /* 2237 * vm_map_lookup: 2238 * 2239 * Finds the VM object, offset, and 2240 * protection for a given virtual address in the 2241 * specified map, assuming a page fault of the 2242 * type specified. 2243 * 2244 * Leaves the map in question locked for read; return 2245 * values are guaranteed until a vm_map_lookup_done 2246 * call is performed. Note that the map argument 2247 * is in/out; the returned map must be used in 2248 * the call to vm_map_lookup_done. 2249 * 2250 * A handle (out_entry) is returned for use in 2251 * vm_map_lookup_done, to make that fast. 2252 * 2253 * If a lookup is requested with "write protection" 2254 * specified, the map may be changed to perform virtual 2255 * copying operations, although the data referenced will 2256 * remain the same. 2257 */ 2258 int 2259 vm_map_lookup(var_map, vaddr, fault_type, out_entry, 2260 object, pindex, out_prot, wired, single_use) 2261 vm_map_t *var_map; /* IN/OUT */ 2262 register vm_offset_t vaddr; 2263 register vm_prot_t fault_type; 2264 2265 vm_map_entry_t *out_entry; /* OUT */ 2266 vm_object_t *object; /* OUT */ 2267 vm_pindex_t *pindex; /* OUT */ 2268 vm_prot_t *out_prot; /* OUT */ 2269 boolean_t *wired; /* OUT */ 2270 boolean_t *single_use; /* OUT */ 2271 { 2272 vm_map_t share_map; 2273 vm_offset_t share_offset; 2274 register vm_map_entry_t entry; 2275 register vm_map_t map = *var_map; 2276 register vm_prot_t prot; 2277 register boolean_t su; 2278 2279 RetryLookup:; 2280 2281 /* 2282 * Lookup the faulting address. 2283 */ 2284 2285 vm_map_lock_read(map); 2286 2287 #define RETURN(why) \ 2288 { \ 2289 vm_map_unlock_read(map); \ 2290 return(why); \ 2291 } 2292 2293 /* 2294 * If the map has an interesting hint, try it before calling full 2295 * blown lookup routine. 2296 */ 2297 2298 entry = map->hint; 2299 2300 *out_entry = entry; 2301 2302 if ((entry == &map->header) || 2303 (vaddr < entry->start) || (vaddr >= entry->end)) { 2304 vm_map_entry_t tmp_entry; 2305 2306 /* 2307 * Entry was either not a valid hint, or the vaddr was not 2308 * contained in the entry, so do a full lookup. 2309 */ 2310 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2311 RETURN(KERN_INVALID_ADDRESS); 2312 2313 entry = tmp_entry; 2314 *out_entry = entry; 2315 } 2316 2317 /* 2318 * Handle submaps. 2319 */ 2320 2321 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2322 vm_map_t old_map = map; 2323 2324 *var_map = map = entry->object.sub_map; 2325 vm_map_unlock_read(old_map); 2326 goto RetryLookup; 2327 } 2328 2329 /* 2330 * Check whether this task is allowed to have this page. 2331 * Note the special case for MAP_ENTRY_COW 2332 * pages with an override. This is to implement a forced 2333 * COW for debuggers. 2334 */ 2335 2336 prot = entry->protection; 2337 if ((fault_type & VM_PROT_OVERRIDE_WRITE) == 0 || 2338 (entry->eflags & MAP_ENTRY_COW) == 0 || 2339 (entry->wired_count != 0)) { 2340 if ((fault_type & (prot)) != fault_type) 2341 RETURN(KERN_PROTECTION_FAILURE); 2342 } 2343 2344 /* 2345 * If this page is not pageable, we have to get it for all possible 2346 * accesses. 2347 */ 2348 2349 *wired = (entry->wired_count != 0); 2350 if (*wired) 2351 prot = fault_type = entry->protection; 2352 2353 /* 2354 * If we don't already have a VM object, track it down. 2355 */ 2356 2357 su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0; 2358 if (su) { 2359 share_map = map; 2360 share_offset = vaddr; 2361 } else { 2362 vm_map_entry_t share_entry; 2363 2364 /* 2365 * Compute the sharing map, and offset into it. 2366 */ 2367 2368 share_map = entry->object.share_map; 2369 share_offset = (vaddr - entry->start) + entry->offset; 2370 2371 /* 2372 * Look for the backing store object and offset 2373 */ 2374 2375 vm_map_lock_read(share_map); 2376 2377 if (!vm_map_lookup_entry(share_map, share_offset, 2378 &share_entry)) { 2379 vm_map_unlock_read(share_map); 2380 RETURN(KERN_INVALID_ADDRESS); 2381 } 2382 entry = share_entry; 2383 } 2384 2385 /* 2386 * If the entry was copy-on-write, we either ... 2387 */ 2388 2389 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2390 /* 2391 * If we want to write the page, we may as well handle that 2392 * now since we've got the sharing map locked. 2393 * 2394 * If we don't need to write the page, we just demote the 2395 * permissions allowed. 2396 */ 2397 2398 if (fault_type & VM_PROT_WRITE) { 2399 /* 2400 * Make a new object, and place it in the object 2401 * chain. Note that no new references have appeared 2402 * -- one just moved from the share map to the new 2403 * object. 2404 */ 2405 2406 if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, 2407 (void *)0, curproc)) { 2408 if (share_map != map) 2409 vm_map_unlock_read(map); 2410 goto RetryLookup; 2411 } 2412 vm_object_shadow( 2413 &entry->object.vm_object, 2414 &entry->offset, 2415 OFF_TO_IDX(entry->end - entry->start)); 2416 2417 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2418 2419 lockmgr(&share_map->lock, LK_DOWNGRADE, 2420 (void *)0, curproc); 2421 } else { 2422 /* 2423 * We're attempting to read a copy-on-write page -- 2424 * don't allow writes. 2425 */ 2426 2427 prot &= (~VM_PROT_WRITE); 2428 } 2429 } 2430 /* 2431 * Create an object if necessary. 2432 */ 2433 if (entry->object.vm_object == NULL) { 2434 2435 if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, 2436 (void *)0, curproc)) { 2437 if (share_map != map) 2438 vm_map_unlock_read(map); 2439 goto RetryLookup; 2440 } 2441 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2442 OFF_TO_IDX(entry->end - entry->start)); 2443 entry->offset = 0; 2444 lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc); 2445 } 2446 2447 if (entry->object.vm_object != NULL) 2448 default_pager_convert_to_swapq(entry->object.vm_object); 2449 /* 2450 * Return the object/offset from this entry. If the entry was 2451 * copy-on-write or empty, it has been fixed up. 2452 */ 2453 2454 *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset); 2455 *object = entry->object.vm_object; 2456 2457 /* 2458 * Return whether this is the only map sharing this data. 2459 */ 2460 2461 if (!su) { 2462 su = (share_map->ref_count == 1); 2463 } 2464 *out_prot = prot; 2465 *single_use = su; 2466 2467 return (KERN_SUCCESS); 2468 2469 #undef RETURN 2470 } 2471 2472 /* 2473 * vm_map_lookup_done: 2474 * 2475 * Releases locks acquired by a vm_map_lookup 2476 * (according to the handle returned by that lookup). 2477 */ 2478 2479 void 2480 vm_map_lookup_done(map, entry) 2481 register vm_map_t map; 2482 vm_map_entry_t entry; 2483 { 2484 /* 2485 * If this entry references a map, unlock it first. 2486 */ 2487 2488 if (entry->eflags & MAP_ENTRY_IS_A_MAP) 2489 vm_map_unlock_read(entry->object.share_map); 2490 2491 /* 2492 * Unlock the main-level map 2493 */ 2494 2495 vm_map_unlock_read(map); 2496 } 2497 2498 #include "opt_ddb.h" 2499 #ifdef DDB 2500 #include <sys/kernel.h> 2501 2502 #include <ddb/ddb.h> 2503 2504 /* 2505 * vm_map_print: [ debug ] 2506 */ 2507 DB_SHOW_COMMAND(map, vm_map_print) 2508 { 2509 /* XXX convert args. */ 2510 register vm_map_t map = (vm_map_t)addr; 2511 boolean_t full = have_addr; 2512 2513 register vm_map_entry_t entry; 2514 2515 db_iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 2516 (map->is_main_map ? "Task" : "Share"), 2517 (int) map, (int) (map->pmap), map->ref_count, map->nentries, 2518 map->timestamp); 2519 2520 if (!full && db_indent) 2521 return; 2522 2523 db_indent += 2; 2524 for (entry = map->header.next; entry != &map->header; 2525 entry = entry->next) { 2526 db_iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 2527 (int) entry, (int) entry->start, (int) entry->end); 2528 if (map->is_main_map) { 2529 static char *inheritance_name[4] = 2530 {"share", "copy", "none", "donate_copy"}; 2531 2532 db_printf("prot=%x/%x/%s, ", 2533 entry->protection, 2534 entry->max_protection, 2535 inheritance_name[entry->inheritance]); 2536 if (entry->wired_count != 0) 2537 db_printf("wired, "); 2538 } 2539 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 2540 db_printf("share=0x%x, offset=0x%x\n", 2541 (int) entry->object.share_map, 2542 (int) entry->offset); 2543 if ((entry->prev == &map->header) || 2544 ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) || 2545 (entry->prev->object.share_map != 2546 entry->object.share_map)) { 2547 db_indent += 2; 2548 vm_map_print((int)entry->object.share_map, 2549 full, 0, (char *)0); 2550 db_indent -= 2; 2551 } 2552 } else { 2553 db_printf("object=0x%x, offset=0x%x", 2554 (int) entry->object.vm_object, 2555 (int) entry->offset); 2556 if (entry->eflags & MAP_ENTRY_COW) 2557 db_printf(", copy (%s)", 2558 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 2559 db_printf("\n"); 2560 2561 if ((entry->prev == &map->header) || 2562 (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) || 2563 (entry->prev->object.vm_object != 2564 entry->object.vm_object)) { 2565 db_indent += 2; 2566 vm_object_print((int)entry->object.vm_object, 2567 full, 0, (char *)0); 2568 db_indent -= 2; 2569 } 2570 } 2571 } 2572 db_indent -= 2; 2573 } 2574 #endif /* DDB */ 2575