1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_map.c,v 1.24 1995/08/26 23:18:38 bde Exp $ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_object.h> 79 #include <vm/vm_kern.h> 80 #include <vm/vm_pager.h> 81 82 /* 83 * Virtual memory maps provide for the mapping, protection, 84 * and sharing of virtual memory objects. In addition, 85 * this module provides for an efficient virtual copy of 86 * memory from one map to another. 87 * 88 * Synchronization is required prior to most operations. 89 * 90 * Maps consist of an ordered doubly-linked list of simple 91 * entries; a single hint is used to speed up lookups. 92 * 93 * In order to properly represent the sharing of virtual 94 * memory regions among maps, the map structure is bi-level. 95 * Top-level ("address") maps refer to regions of sharable 96 * virtual memory. These regions are implemented as 97 * ("sharing") maps, which then refer to the actual virtual 98 * memory objects. When two address maps "share" memory, 99 * their top-level maps both have references to the same 100 * sharing map. When memory is virtual-copied from one 101 * address map to another, the references in the sharing 102 * maps are actually copied -- no copying occurs at the 103 * virtual memory object level. 104 * 105 * Since portions of maps are specified by start/end addreses, 106 * which may not align with existing map entries, all 107 * routines merely "clip" entries to these start/end values. 108 * [That is, an entry is split into two, bordering at a 109 * start or end value.] Note that these clippings may not 110 * always be necessary (as the two resulting entries are then 111 * not changed); however, the clipping is done for convenience. 112 * No attempt is currently made to "glue back together" two 113 * abutting entries. 114 * 115 * As mentioned above, virtual copy operations are performed 116 * by copying VM object references from one sharing map to 117 * another, and then marking both regions as copy-on-write. 118 * It is important to note that only one writeable reference 119 * to a VM object region exists in any map -- this means that 120 * shadow object creation can be delayed until a write operation 121 * occurs. 122 */ 123 124 /* 125 * vm_map_startup: 126 * 127 * Initialize the vm_map module. Must be called before 128 * any other vm_map routines. 129 * 130 * Map and entry structures are allocated from the general 131 * purpose memory pool with some exceptions: 132 * 133 * - The kernel map and kmem submap are allocated statically. 134 * - Kernel map entries are allocated out of a static pool. 135 * 136 * These restrictions are necessary since malloc() uses the 137 * maps and requires map entries. 138 */ 139 140 vm_offset_t kentry_data; 141 vm_size_t kentry_data_size; 142 vm_map_entry_t kentry_free; 143 vm_map_t kmap_free; 144 145 int kentry_count; 146 static vm_offset_t mapvm_start, mapvm, mapvmmax; 147 static int mapvmpgcnt; 148 149 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 150 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 151 152 void 153 vm_map_startup() 154 { 155 register int i; 156 register vm_map_entry_t mep; 157 vm_map_t mp; 158 159 /* 160 * Static map structures for allocation before initialization of 161 * kernel map or kmem map. vm_map_create knows how to deal with them. 162 */ 163 kmap_free = mp = (vm_map_t) kentry_data; 164 i = MAX_KMAP; 165 while (--i > 0) { 166 mp->header.next = (vm_map_entry_t) (mp + 1); 167 mp++; 168 } 169 mp++->header.next = NULL; 170 171 /* 172 * Form a free list of statically allocated kernel map entries with 173 * the rest. 174 */ 175 kentry_free = mep = (vm_map_entry_t) mp; 176 kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; 177 while (--i > 0) { 178 mep->next = mep + 1; 179 mep++; 180 } 181 mep->next = NULL; 182 } 183 184 /* 185 * Allocate a vmspace structure, including a vm_map and pmap, 186 * and initialize those structures. The refcnt is set to 1. 187 * The remaining fields must be initialized by the caller. 188 */ 189 struct vmspace * 190 vmspace_alloc(min, max, pageable) 191 vm_offset_t min, max; 192 int pageable; 193 { 194 register struct vmspace *vm; 195 196 if (mapvmpgcnt == 0 && mapvm == 0) { 197 int s; 198 199 mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE; 200 s = splhigh(); 201 mapvm_start = mapvm = kmem_alloc_pageable(kmem_map, mapvmpgcnt * PAGE_SIZE); 202 mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE; 203 splx(s); 204 if (!mapvm) 205 mapvmpgcnt = 0; 206 } 207 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); 208 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); 209 vm_map_init(&vm->vm_map, min, max, pageable); 210 pmap_pinit(&vm->vm_pmap); 211 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ 212 vm->vm_refcnt = 1; 213 return (vm); 214 } 215 216 void 217 vmspace_free(vm) 218 register struct vmspace *vm; 219 { 220 221 if (vm->vm_refcnt == 0) 222 panic("vmspace_free: attempt to free already freed vmspace"); 223 224 if (--vm->vm_refcnt == 0) { 225 /* 226 * Lock the map, to wait out all other references to it. 227 * Delete all of the mappings and pages they hold, then call 228 * the pmap module to reclaim anything left. 229 */ 230 vm_map_lock(&vm->vm_map); 231 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 232 vm->vm_map.max_offset); 233 vm_map_unlock(&vm->vm_map); 234 while( vm->vm_map.ref_count != 1) 235 tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0); 236 --vm->vm_map.ref_count; 237 pmap_release(&vm->vm_pmap); 238 FREE(vm, M_VMMAP); 239 } 240 } 241 242 /* 243 * vm_map_create: 244 * 245 * Creates and returns a new empty VM map with 246 * the given physical map structure, and having 247 * the given lower and upper address bounds. 248 */ 249 vm_map_t 250 vm_map_create(pmap, min, max, pageable) 251 pmap_t pmap; 252 vm_offset_t min, max; 253 boolean_t pageable; 254 { 255 register vm_map_t result; 256 257 if (kmem_map == NULL) { 258 result = kmap_free; 259 kmap_free = (vm_map_t) result->header.next; 260 if (result == NULL) 261 panic("vm_map_create: out of maps"); 262 } else 263 MALLOC(result, vm_map_t, sizeof(struct vm_map), 264 M_VMMAP, M_WAITOK); 265 266 vm_map_init(result, min, max, pageable); 267 result->pmap = pmap; 268 return (result); 269 } 270 271 /* 272 * Initialize an existing vm_map structure 273 * such as that in the vmspace structure. 274 * The pmap is set elsewhere. 275 */ 276 void 277 vm_map_init(map, min, max, pageable) 278 register struct vm_map *map; 279 vm_offset_t min, max; 280 boolean_t pageable; 281 { 282 map->header.next = map->header.prev = &map->header; 283 map->nentries = 0; 284 map->size = 0; 285 map->ref_count = 1; 286 map->is_main_map = TRUE; 287 map->min_offset = min; 288 map->max_offset = max; 289 map->entries_pageable = pageable; 290 map->first_free = &map->header; 291 map->hint = &map->header; 292 map->timestamp = 0; 293 lock_init(&map->lock, TRUE); 294 } 295 296 /* 297 * vm_map_entry_create: [ internal use only ] 298 * 299 * Allocates a VM map entry for insertion. 300 * No entry fields are filled in. This routine is 301 */ 302 static struct vm_map_entry *mappool; 303 static int mappoolcnt; 304 305 vm_map_entry_t 306 vm_map_entry_create(map) 307 vm_map_t map; 308 { 309 vm_map_entry_t entry; 310 int i; 311 312 #define KENTRY_LOW_WATER 64 313 #define MAPENTRY_LOW_WATER 128 314 315 /* 316 * This is a *very* nasty (and sort of incomplete) hack!!!! 317 */ 318 if (kentry_count < KENTRY_LOW_WATER) { 319 if (mapvmpgcnt && mapvm) { 320 vm_page_t m; 321 322 m = vm_page_alloc(kmem_object, 323 mapvm - vm_map_min(kmem_map), 324 (map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL); 325 if (m) { 326 int newentries; 327 328 newentries = (PAGE_SIZE / sizeof(struct vm_map_entry)); 329 vm_page_wire(m); 330 m->flags &= ~PG_BUSY; 331 m->valid = VM_PAGE_BITS_ALL; 332 pmap_enter(vm_map_pmap(kmem_map), mapvm, 333 VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1); 334 m->flags |= PG_WRITEABLE|PG_MAPPED; 335 336 entry = (vm_map_entry_t) mapvm; 337 mapvm += PAGE_SIZE; 338 --mapvmpgcnt; 339 340 for (i = 0; i < newentries; i++) { 341 vm_map_entry_dispose(kernel_map, entry); 342 entry++; 343 } 344 } 345 } 346 } 347 if (map == kernel_map || map == kmem_map || map == pager_map) { 348 349 entry = kentry_free; 350 if (entry) { 351 kentry_free = entry->next; 352 --kentry_count; 353 return entry; 354 } 355 entry = mappool; 356 if (entry) { 357 mappool = entry->next; 358 --mappoolcnt; 359 return entry; 360 } 361 } else { 362 entry = mappool; 363 if (entry) { 364 mappool = entry->next; 365 --mappoolcnt; 366 return entry; 367 } 368 MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), 369 M_VMMAPENT, M_WAITOK); 370 } 371 if (entry == NULL) 372 panic("vm_map_entry_create: out of map entries"); 373 374 return (entry); 375 } 376 377 /* 378 * vm_map_entry_dispose: [ internal use only ] 379 * 380 * Inverse of vm_map_entry_create. 381 */ 382 void 383 vm_map_entry_dispose(map, entry) 384 vm_map_t map; 385 vm_map_entry_t entry; 386 { 387 if ((kentry_count < KENTRY_LOW_WATER) || 388 ((vm_offset_t) entry >= kentry_data && (vm_offset_t) entry < (kentry_data + kentry_data_size)) || 389 ((vm_offset_t) entry >= mapvm_start && (vm_offset_t) entry < mapvmmax)) { 390 entry->next = kentry_free; 391 kentry_free = entry; 392 ++kentry_count; 393 return; 394 } else { 395 if (mappoolcnt < MAPENTRY_LOW_WATER) { 396 entry->next = mappool; 397 mappool = entry; 398 ++mappoolcnt; 399 return; 400 } 401 FREE(entry, M_VMMAPENT); 402 } 403 } 404 405 /* 406 * vm_map_entry_{un,}link: 407 * 408 * Insert/remove entries from maps. 409 */ 410 #define vm_map_entry_link(map, after_where, entry) \ 411 { \ 412 (map)->nentries++; \ 413 (entry)->prev = (after_where); \ 414 (entry)->next = (after_where)->next; \ 415 (entry)->prev->next = (entry); \ 416 (entry)->next->prev = (entry); \ 417 } 418 #define vm_map_entry_unlink(map, entry) \ 419 { \ 420 (map)->nentries--; \ 421 (entry)->next->prev = (entry)->prev; \ 422 (entry)->prev->next = (entry)->next; \ 423 } 424 425 /* 426 * vm_map_reference: 427 * 428 * Creates another valid reference to the given map. 429 * 430 */ 431 void 432 vm_map_reference(map) 433 register vm_map_t map; 434 { 435 if (map == NULL) 436 return; 437 438 map->ref_count++; 439 } 440 441 /* 442 * vm_map_deallocate: 443 * 444 * Removes a reference from the specified map, 445 * destroying it if no references remain. 446 * The map should not be locked. 447 */ 448 void 449 vm_map_deallocate(map) 450 register vm_map_t map; 451 { 452 register int c; 453 454 if (map == NULL) 455 return; 456 457 c = map->ref_count; 458 459 if (c == 0) 460 panic("vm_map_deallocate: deallocating already freed map"); 461 462 if (c != 1) { 463 --map->ref_count; 464 wakeup(&map->ref_count); 465 return; 466 } 467 /* 468 * Lock the map, to wait out all other references to it. 469 */ 470 471 vm_map_lock(map); 472 (void) vm_map_delete(map, map->min_offset, map->max_offset); 473 --map->ref_count; 474 if( map->ref_count != 0) { 475 vm_map_unlock(map); 476 return; 477 } 478 479 pmap_destroy(map->pmap); 480 FREE(map, M_VMMAP); 481 } 482 483 /* 484 * vm_map_insert: 485 * 486 * Inserts the given whole VM object into the target 487 * map at the specified address range. The object's 488 * size should match that of the address range. 489 * 490 * Requires that the map be locked, and leaves it so. 491 */ 492 int 493 vm_map_insert(map, object, offset, start, end) 494 vm_map_t map; 495 vm_object_t object; 496 vm_offset_t offset; 497 vm_offset_t start; 498 vm_offset_t end; 499 { 500 register vm_map_entry_t new_entry; 501 register vm_map_entry_t prev_entry; 502 vm_map_entry_t temp_entry; 503 504 /* 505 * Check that the start and end points are not bogus. 506 */ 507 508 if ((start < map->min_offset) || (end > map->max_offset) || 509 (start >= end)) 510 return (KERN_INVALID_ADDRESS); 511 512 /* 513 * Find the entry prior to the proposed starting address; if it's part 514 * of an existing entry, this range is bogus. 515 */ 516 517 if (vm_map_lookup_entry(map, start, &temp_entry)) 518 return (KERN_NO_SPACE); 519 520 prev_entry = temp_entry; 521 522 /* 523 * Assert that the next entry doesn't overlap the end point. 524 */ 525 526 if ((prev_entry->next != &map->header) && 527 (prev_entry->next->start < end)) 528 return (KERN_NO_SPACE); 529 530 /* 531 * See if we can avoid creating a new entry by extending one of our 532 * neighbors. 533 */ 534 535 if (object == NULL) { 536 if ((prev_entry != &map->header) && 537 (prev_entry->end == start) && 538 (map->is_main_map) && 539 (prev_entry->is_a_map == FALSE) && 540 (prev_entry->is_sub_map == FALSE) && 541 (prev_entry->inheritance == VM_INHERIT_DEFAULT) && 542 (prev_entry->protection == VM_PROT_DEFAULT) && 543 (prev_entry->max_protection == VM_PROT_DEFAULT) && 544 (prev_entry->wired_count == 0)) { 545 546 if (vm_object_coalesce(prev_entry->object.vm_object, 547 NULL, 548 prev_entry->offset, 549 (vm_offset_t) 0, 550 (vm_size_t) (prev_entry->end 551 - prev_entry->start), 552 (vm_size_t) (end - prev_entry->end))) { 553 /* 554 * Coalesced the two objects - can extend the 555 * previous map entry to include the new 556 * range. 557 */ 558 map->size += (end - prev_entry->end); 559 prev_entry->end = end; 560 return (KERN_SUCCESS); 561 } 562 } 563 } 564 /* 565 * Create a new entry 566 */ 567 568 new_entry = vm_map_entry_create(map); 569 new_entry->start = start; 570 new_entry->end = end; 571 572 new_entry->is_a_map = FALSE; 573 new_entry->is_sub_map = FALSE; 574 new_entry->object.vm_object = object; 575 new_entry->offset = offset; 576 577 new_entry->copy_on_write = FALSE; 578 new_entry->needs_copy = FALSE; 579 580 if (map->is_main_map) { 581 new_entry->inheritance = VM_INHERIT_DEFAULT; 582 new_entry->protection = VM_PROT_DEFAULT; 583 new_entry->max_protection = VM_PROT_DEFAULT; 584 new_entry->wired_count = 0; 585 } 586 /* 587 * Insert the new entry into the list 588 */ 589 590 vm_map_entry_link(map, prev_entry, new_entry); 591 map->size += new_entry->end - new_entry->start; 592 593 /* 594 * Update the free space hint 595 */ 596 597 if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) 598 map->first_free = new_entry; 599 600 return (KERN_SUCCESS); 601 } 602 603 /* 604 * SAVE_HINT: 605 * 606 * Saves the specified entry as the hint for 607 * future lookups. 608 */ 609 #define SAVE_HINT(map,value) \ 610 (map)->hint = (value); 611 612 /* 613 * vm_map_lookup_entry: [ internal use only ] 614 * 615 * Finds the map entry containing (or 616 * immediately preceding) the specified address 617 * in the given map; the entry is returned 618 * in the "entry" parameter. The boolean 619 * result indicates whether the address is 620 * actually contained in the map. 621 */ 622 boolean_t 623 vm_map_lookup_entry(map, address, entry) 624 register vm_map_t map; 625 register vm_offset_t address; 626 vm_map_entry_t *entry; /* OUT */ 627 { 628 register vm_map_entry_t cur; 629 register vm_map_entry_t last; 630 631 /* 632 * Start looking either from the head of the list, or from the hint. 633 */ 634 635 cur = map->hint; 636 637 if (cur == &map->header) 638 cur = cur->next; 639 640 if (address >= cur->start) { 641 /* 642 * Go from hint to end of list. 643 * 644 * But first, make a quick check to see if we are already looking 645 * at the entry we want (which is usually the case). Note also 646 * that we don't need to save the hint here... it is the same 647 * hint (unless we are at the header, in which case the hint 648 * didn't buy us anything anyway). 649 */ 650 last = &map->header; 651 if ((cur != last) && (cur->end > address)) { 652 *entry = cur; 653 return (TRUE); 654 } 655 } else { 656 /* 657 * Go from start to hint, *inclusively* 658 */ 659 last = cur->next; 660 cur = map->header.next; 661 } 662 663 /* 664 * Search linearly 665 */ 666 667 while (cur != last) { 668 if (cur->end > address) { 669 if (address >= cur->start) { 670 /* 671 * Save this lookup for future hints, and 672 * return 673 */ 674 675 *entry = cur; 676 SAVE_HINT(map, cur); 677 return (TRUE); 678 } 679 break; 680 } 681 cur = cur->next; 682 } 683 *entry = cur->prev; 684 SAVE_HINT(map, *entry); 685 return (FALSE); 686 } 687 688 /* 689 * Find sufficient space for `length' bytes in the given map, starting at 690 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 691 */ 692 int 693 vm_map_findspace(map, start, length, addr) 694 register vm_map_t map; 695 register vm_offset_t start; 696 vm_size_t length; 697 vm_offset_t *addr; 698 { 699 register vm_map_entry_t entry, next; 700 register vm_offset_t end; 701 702 if (start < map->min_offset) 703 start = map->min_offset; 704 if (start > map->max_offset) 705 return (1); 706 707 /* 708 * Look for the first possible address; if there's already something 709 * at this address, we have to start after it. 710 */ 711 if (start == map->min_offset) { 712 if ((entry = map->first_free) != &map->header) 713 start = entry->end; 714 } else { 715 vm_map_entry_t tmp; 716 717 if (vm_map_lookup_entry(map, start, &tmp)) 718 start = tmp->end; 719 entry = tmp; 720 } 721 722 /* 723 * Look through the rest of the map, trying to fit a new region in the 724 * gap between existing regions, or after the very last region. 725 */ 726 for (;; start = (entry = next)->end) { 727 /* 728 * Find the end of the proposed new region. Be sure we didn't 729 * go beyond the end of the map, or wrap around the address; 730 * if so, we lose. Otherwise, if this is the last entry, or 731 * if the proposed new region fits before the next entry, we 732 * win. 733 */ 734 end = start + length; 735 if (end > map->max_offset || end < start) 736 return (1); 737 next = entry->next; 738 if (next == &map->header || next->start >= end) 739 break; 740 } 741 SAVE_HINT(map, entry); 742 *addr = start; 743 if (map == kernel_map && round_page(start + length) > kernel_vm_end) 744 pmap_growkernel(round_page(start + length)); 745 return (0); 746 } 747 748 /* 749 * vm_map_find finds an unallocated region in the target address 750 * map with the given length. The search is defined to be 751 * first-fit from the specified address; the region found is 752 * returned in the same parameter. 753 * 754 */ 755 int 756 vm_map_find(map, object, offset, addr, length, find_space) 757 vm_map_t map; 758 vm_object_t object; 759 vm_offset_t offset; 760 vm_offset_t *addr; /* IN/OUT */ 761 vm_size_t length; 762 boolean_t find_space; 763 { 764 register vm_offset_t start; 765 int result, s = 0; 766 767 start = *addr; 768 vm_map_lock(map); 769 770 if (map == kmem_map) 771 s = splhigh(); 772 773 if (find_space) { 774 if (vm_map_findspace(map, start, length, addr)) { 775 vm_map_unlock(map); 776 if (map == kmem_map) 777 splx(s); 778 return (KERN_NO_SPACE); 779 } 780 start = *addr; 781 } 782 result = vm_map_insert(map, object, offset, start, start + length); 783 vm_map_unlock(map); 784 785 if (map == kmem_map) 786 splx(s); 787 788 return (result); 789 } 790 791 /* 792 * vm_map_simplify_entry: [ internal use only ] 793 * 794 * Simplify the given map entry by: 795 * removing extra sharing maps 796 * [XXX maybe later] merging with a neighbor 797 */ 798 void 799 vm_map_simplify_entry(map, entry) 800 vm_map_t map; 801 vm_map_entry_t entry; 802 { 803 #ifdef lint 804 map++; 805 #endif 806 807 /* 808 * If this entry corresponds to a sharing map, then see if we can 809 * remove the level of indirection. If it's not a sharing map, then it 810 * points to a VM object, so see if we can merge with either of our 811 * neighbors. 812 */ 813 814 if (entry->is_sub_map) 815 return; 816 if (entry->is_a_map) { 817 #if 0 818 vm_map_t my_share_map; 819 int count; 820 821 my_share_map = entry->object.share_map; 822 count = my_share_map->ref_count; 823 824 if (count == 1) { 825 /* 826 * Can move the region from entry->start to entry->end 827 * (+ entry->offset) in my_share_map into place of 828 * entry. Later. 829 */ 830 } 831 #endif 832 } else { 833 /* 834 * Try to merge with our neighbors. 835 * 836 * Conditions for merge are: 837 * 838 * 1. entries are adjacent. 2. both entries point to objects 839 * with null pagers. 840 * 841 * If a merge is possible, we replace the two entries with a 842 * single entry, then merge the two objects into a single 843 * object. 844 * 845 * Now, all that is left to do is write the code! 846 */ 847 } 848 } 849 850 /* 851 * vm_map_clip_start: [ internal use only ] 852 * 853 * Asserts that the given entry begins at or after 854 * the specified address; if necessary, 855 * it splits the entry into two. 856 */ 857 #define vm_map_clip_start(map, entry, startaddr) \ 858 { \ 859 if (startaddr > entry->start) \ 860 _vm_map_clip_start(map, entry, startaddr); \ 861 } 862 863 /* 864 * This routine is called only when it is known that 865 * the entry must be split. 866 */ 867 static void 868 _vm_map_clip_start(map, entry, start) 869 register vm_map_t map; 870 register vm_map_entry_t entry; 871 register vm_offset_t start; 872 { 873 register vm_map_entry_t new_entry; 874 875 /* 876 * See if we can simplify this entry first 877 */ 878 879 /* vm_map_simplify_entry(map, entry); */ 880 881 /* 882 * Split off the front portion -- note that we must insert the new 883 * entry BEFORE this one, so that this entry has the specified 884 * starting address. 885 */ 886 887 new_entry = vm_map_entry_create(map); 888 *new_entry = *entry; 889 890 new_entry->end = start; 891 entry->offset += (start - entry->start); 892 entry->start = start; 893 894 vm_map_entry_link(map, entry->prev, new_entry); 895 896 if (entry->is_a_map || entry->is_sub_map) 897 vm_map_reference(new_entry->object.share_map); 898 else 899 vm_object_reference(new_entry->object.vm_object); 900 } 901 902 /* 903 * vm_map_clip_end: [ internal use only ] 904 * 905 * Asserts that the given entry ends at or before 906 * the specified address; if necessary, 907 * it splits the entry into two. 908 */ 909 910 #define vm_map_clip_end(map, entry, endaddr) \ 911 { \ 912 if (endaddr < entry->end) \ 913 _vm_map_clip_end(map, entry, endaddr); \ 914 } 915 916 /* 917 * This routine is called only when it is known that 918 * the entry must be split. 919 */ 920 static void 921 _vm_map_clip_end(map, entry, end) 922 register vm_map_t map; 923 register vm_map_entry_t entry; 924 register vm_offset_t end; 925 { 926 register vm_map_entry_t new_entry; 927 928 /* 929 * Create a new entry and insert it AFTER the specified entry 930 */ 931 932 new_entry = vm_map_entry_create(map); 933 *new_entry = *entry; 934 935 new_entry->start = entry->end = end; 936 new_entry->offset += (end - entry->start); 937 938 vm_map_entry_link(map, entry, new_entry); 939 940 if (entry->is_a_map || entry->is_sub_map) 941 vm_map_reference(new_entry->object.share_map); 942 else 943 vm_object_reference(new_entry->object.vm_object); 944 } 945 946 /* 947 * VM_MAP_RANGE_CHECK: [ internal use only ] 948 * 949 * Asserts that the starting and ending region 950 * addresses fall within the valid range of the map. 951 */ 952 #define VM_MAP_RANGE_CHECK(map, start, end) \ 953 { \ 954 if (start < vm_map_min(map)) \ 955 start = vm_map_min(map); \ 956 if (end > vm_map_max(map)) \ 957 end = vm_map_max(map); \ 958 if (start > end) \ 959 start = end; \ 960 } 961 962 /* 963 * vm_map_submap: [ kernel use only ] 964 * 965 * Mark the given range as handled by a subordinate map. 966 * 967 * This range must have been created with vm_map_find, 968 * and no other operations may have been performed on this 969 * range prior to calling vm_map_submap. 970 * 971 * Only a limited number of operations can be performed 972 * within this rage after calling vm_map_submap: 973 * vm_fault 974 * [Don't try vm_map_copy!] 975 * 976 * To remove a submapping, one must first remove the 977 * range from the superior map, and then destroy the 978 * submap (if desired). [Better yet, don't try it.] 979 */ 980 int 981 vm_map_submap(map, start, end, submap) 982 register vm_map_t map; 983 register vm_offset_t start; 984 register vm_offset_t end; 985 vm_map_t submap; 986 { 987 vm_map_entry_t entry; 988 register int result = KERN_INVALID_ARGUMENT; 989 990 vm_map_lock(map); 991 992 VM_MAP_RANGE_CHECK(map, start, end); 993 994 if (vm_map_lookup_entry(map, start, &entry)) { 995 vm_map_clip_start(map, entry, start); 996 } else 997 entry = entry->next; 998 999 vm_map_clip_end(map, entry, end); 1000 1001 if ((entry->start == start) && (entry->end == end) && 1002 (!entry->is_a_map) && 1003 (entry->object.vm_object == NULL) && 1004 (!entry->copy_on_write)) { 1005 entry->is_a_map = FALSE; 1006 entry->is_sub_map = TRUE; 1007 vm_map_reference(entry->object.sub_map = submap); 1008 result = KERN_SUCCESS; 1009 } 1010 vm_map_unlock(map); 1011 1012 return (result); 1013 } 1014 1015 /* 1016 * vm_map_protect: 1017 * 1018 * Sets the protection of the specified address 1019 * region in the target map. If "set_max" is 1020 * specified, the maximum protection is to be set; 1021 * otherwise, only the current protection is affected. 1022 */ 1023 int 1024 vm_map_protect(map, start, end, new_prot, set_max) 1025 register vm_map_t map; 1026 register vm_offset_t start; 1027 register vm_offset_t end; 1028 register vm_prot_t new_prot; 1029 register boolean_t set_max; 1030 { 1031 register vm_map_entry_t current; 1032 vm_map_entry_t entry; 1033 1034 vm_map_lock(map); 1035 1036 VM_MAP_RANGE_CHECK(map, start, end); 1037 1038 if (vm_map_lookup_entry(map, start, &entry)) { 1039 vm_map_clip_start(map, entry, start); 1040 } else 1041 entry = entry->next; 1042 1043 /* 1044 * Make a first pass to check for protection violations. 1045 */ 1046 1047 current = entry; 1048 while ((current != &map->header) && (current->start < end)) { 1049 if (current->is_sub_map) { 1050 vm_map_unlock(map); 1051 return (KERN_INVALID_ARGUMENT); 1052 } 1053 if ((new_prot & current->max_protection) != new_prot) { 1054 vm_map_unlock(map); 1055 return (KERN_PROTECTION_FAILURE); 1056 } 1057 current = current->next; 1058 } 1059 1060 /* 1061 * Go back and fix up protections. [Note that clipping is not 1062 * necessary the second time.] 1063 */ 1064 1065 current = entry; 1066 1067 while ((current != &map->header) && (current->start < end)) { 1068 vm_prot_t old_prot; 1069 1070 vm_map_clip_end(map, current, end); 1071 1072 old_prot = current->protection; 1073 if (set_max) 1074 current->protection = 1075 (current->max_protection = new_prot) & 1076 old_prot; 1077 else 1078 current->protection = new_prot; 1079 1080 /* 1081 * Update physical map if necessary. Worry about copy-on-write 1082 * here -- CHECK THIS XXX 1083 */ 1084 1085 if (current->protection != old_prot) { 1086 1087 #define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ 1088 VM_PROT_ALL) 1089 #define max(a,b) ((a) > (b) ? (a) : (b)) 1090 1091 if (current->is_a_map) { 1092 vm_map_entry_t share_entry; 1093 vm_offset_t share_end; 1094 1095 vm_map_lock(current->object.share_map); 1096 (void) vm_map_lookup_entry( 1097 current->object.share_map, 1098 current->offset, 1099 &share_entry); 1100 share_end = current->offset + 1101 (current->end - current->start); 1102 while ((share_entry != 1103 ¤t->object.share_map->header) && 1104 (share_entry->start < share_end)) { 1105 1106 pmap_protect(map->pmap, 1107 (max(share_entry->start, 1108 current->offset) - 1109 current->offset + 1110 current->start), 1111 min(share_entry->end, 1112 share_end) - 1113 current->offset + 1114 current->start, 1115 current->protection & 1116 MASK(share_entry)); 1117 1118 share_entry = share_entry->next; 1119 } 1120 vm_map_unlock(current->object.share_map); 1121 } else 1122 pmap_protect(map->pmap, current->start, 1123 current->end, 1124 current->protection & MASK(entry)); 1125 #undef max 1126 #undef MASK 1127 } 1128 current = current->next; 1129 } 1130 1131 vm_map_unlock(map); 1132 return (KERN_SUCCESS); 1133 } 1134 1135 /* 1136 * vm_map_inherit: 1137 * 1138 * Sets the inheritance of the specified address 1139 * range in the target map. Inheritance 1140 * affects how the map will be shared with 1141 * child maps at the time of vm_map_fork. 1142 */ 1143 int 1144 vm_map_inherit(map, start, end, new_inheritance) 1145 register vm_map_t map; 1146 register vm_offset_t start; 1147 register vm_offset_t end; 1148 register vm_inherit_t new_inheritance; 1149 { 1150 register vm_map_entry_t entry; 1151 vm_map_entry_t temp_entry; 1152 1153 switch (new_inheritance) { 1154 case VM_INHERIT_NONE: 1155 case VM_INHERIT_COPY: 1156 case VM_INHERIT_SHARE: 1157 break; 1158 default: 1159 return (KERN_INVALID_ARGUMENT); 1160 } 1161 1162 vm_map_lock(map); 1163 1164 VM_MAP_RANGE_CHECK(map, start, end); 1165 1166 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1167 entry = temp_entry; 1168 vm_map_clip_start(map, entry, start); 1169 } else 1170 entry = temp_entry->next; 1171 1172 while ((entry != &map->header) && (entry->start < end)) { 1173 vm_map_clip_end(map, entry, end); 1174 1175 entry->inheritance = new_inheritance; 1176 1177 entry = entry->next; 1178 } 1179 1180 vm_map_unlock(map); 1181 return (KERN_SUCCESS); 1182 } 1183 1184 /* 1185 * vm_map_pageable: 1186 * 1187 * Sets the pageability of the specified address 1188 * range in the target map. Regions specified 1189 * as not pageable require locked-down physical 1190 * memory and physical page maps. 1191 * 1192 * The map must not be locked, but a reference 1193 * must remain to the map throughout the call. 1194 */ 1195 int 1196 vm_map_pageable(map, start, end, new_pageable) 1197 register vm_map_t map; 1198 register vm_offset_t start; 1199 register vm_offset_t end; 1200 register boolean_t new_pageable; 1201 { 1202 register vm_map_entry_t entry; 1203 vm_map_entry_t start_entry; 1204 register vm_offset_t failed = 0; 1205 int rv; 1206 1207 vm_map_lock(map); 1208 1209 VM_MAP_RANGE_CHECK(map, start, end); 1210 1211 /* 1212 * Only one pageability change may take place at one time, since 1213 * vm_fault assumes it will be called only once for each 1214 * wiring/unwiring. Therefore, we have to make sure we're actually 1215 * changing the pageability for the entire region. We do so before 1216 * making any changes. 1217 */ 1218 1219 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1220 vm_map_unlock(map); 1221 return (KERN_INVALID_ADDRESS); 1222 } 1223 entry = start_entry; 1224 1225 /* 1226 * Actions are rather different for wiring and unwiring, so we have 1227 * two separate cases. 1228 */ 1229 1230 if (new_pageable) { 1231 1232 vm_map_clip_start(map, entry, start); 1233 1234 /* 1235 * Unwiring. First ensure that the range to be unwired is 1236 * really wired down and that there are no holes. 1237 */ 1238 while ((entry != &map->header) && (entry->start < end)) { 1239 1240 if (entry->wired_count == 0 || 1241 (entry->end < end && 1242 (entry->next == &map->header || 1243 entry->next->start > entry->end))) { 1244 vm_map_unlock(map); 1245 return (KERN_INVALID_ARGUMENT); 1246 } 1247 entry = entry->next; 1248 } 1249 1250 /* 1251 * Now decrement the wiring count for each region. If a region 1252 * becomes completely unwired, unwire its physical pages and 1253 * mappings. 1254 */ 1255 lock_set_recursive(&map->lock); 1256 1257 entry = start_entry; 1258 while ((entry != &map->header) && (entry->start < end)) { 1259 vm_map_clip_end(map, entry, end); 1260 1261 entry->wired_count--; 1262 if (entry->wired_count == 0) 1263 vm_fault_unwire(map, entry->start, entry->end); 1264 1265 entry = entry->next; 1266 } 1267 lock_clear_recursive(&map->lock); 1268 } else { 1269 /* 1270 * Wiring. We must do this in two passes: 1271 * 1272 * 1. Holding the write lock, we create any shadow or zero-fill 1273 * objects that need to be created. Then we clip each map 1274 * entry to the region to be wired and increment its wiring 1275 * count. We create objects before clipping the map entries 1276 * to avoid object proliferation. 1277 * 1278 * 2. We downgrade to a read lock, and call vm_fault_wire to 1279 * fault in the pages for any newly wired area (wired_count is 1280 * 1). 1281 * 1282 * Downgrading to a read lock for vm_fault_wire avoids a possible 1283 * deadlock with another process that may have faulted on one 1284 * of the pages to be wired (it would mark the page busy, 1285 * blocking us, then in turn block on the map lock that we 1286 * hold). Because of problems in the recursive lock package, 1287 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1288 * any actions that require the write lock must be done 1289 * beforehand. Because we keep the read lock on the map, the 1290 * copy-on-write status of the entries we modify here cannot 1291 * change. 1292 */ 1293 1294 /* 1295 * Pass 1. 1296 */ 1297 while ((entry != &map->header) && (entry->start < end)) { 1298 if (entry->wired_count == 0) { 1299 1300 /* 1301 * Perform actions of vm_map_lookup that need 1302 * the write lock on the map: create a shadow 1303 * object for a copy-on-write region, or an 1304 * object for a zero-fill region. 1305 * 1306 * We don't have to do this for entries that 1307 * point to sharing maps, because we won't 1308 * hold the lock on the sharing map. 1309 */ 1310 if (!entry->is_a_map && !entry->is_sub_map) { 1311 if (entry->needs_copy && 1312 ((entry->protection & VM_PROT_WRITE) != 0)) { 1313 1314 vm_object_shadow(&entry->object.vm_object, 1315 &entry->offset, 1316 (vm_size_t) (entry->end 1317 - entry->start)); 1318 entry->needs_copy = FALSE; 1319 } else if (entry->object.vm_object == NULL) { 1320 entry->object.vm_object = 1321 vm_object_allocate(OBJT_DEFAULT, (vm_size_t) (entry->end 1322 - entry->start)); 1323 entry->offset = (vm_offset_t) 0; 1324 } 1325 } 1326 } 1327 vm_map_clip_start(map, entry, start); 1328 vm_map_clip_end(map, entry, end); 1329 entry->wired_count++; 1330 1331 /* 1332 * Check for holes 1333 */ 1334 if (entry->end < end && 1335 (entry->next == &map->header || 1336 entry->next->start > entry->end)) { 1337 /* 1338 * Found one. Object creation actions do not 1339 * need to be undone, but the wired counts 1340 * need to be restored. 1341 */ 1342 while (entry != &map->header && entry->end > start) { 1343 entry->wired_count--; 1344 entry = entry->prev; 1345 } 1346 vm_map_unlock(map); 1347 return (KERN_INVALID_ARGUMENT); 1348 } 1349 entry = entry->next; 1350 } 1351 1352 /* 1353 * Pass 2. 1354 */ 1355 1356 /* 1357 * HACK HACK HACK HACK 1358 * 1359 * If we are wiring in the kernel map or a submap of it, 1360 * unlock the map to avoid deadlocks. We trust that the 1361 * kernel is well-behaved, and therefore will not do 1362 * anything destructive to this region of the map while 1363 * we have it unlocked. We cannot trust user processes 1364 * to do the same. 1365 * 1366 * HACK HACK HACK HACK 1367 */ 1368 if (vm_map_pmap(map) == kernel_pmap) { 1369 vm_map_unlock(map); /* trust me ... */ 1370 } else { 1371 lock_set_recursive(&map->lock); 1372 lock_write_to_read(&map->lock); 1373 } 1374 1375 rv = 0; 1376 entry = start_entry; 1377 while (entry != &map->header && entry->start < end) { 1378 /* 1379 * If vm_fault_wire fails for any page we need to undo 1380 * what has been done. We decrement the wiring count 1381 * for those pages which have not yet been wired (now) 1382 * and unwire those that have (later). 1383 * 1384 * XXX this violates the locking protocol on the map, 1385 * needs to be fixed. 1386 */ 1387 if (rv) 1388 entry->wired_count--; 1389 else if (entry->wired_count == 1) { 1390 rv = vm_fault_wire(map, entry->start, entry->end); 1391 if (rv) { 1392 failed = entry->start; 1393 entry->wired_count--; 1394 } 1395 } 1396 entry = entry->next; 1397 } 1398 1399 if (vm_map_pmap(map) == kernel_pmap) { 1400 vm_map_lock(map); 1401 } else { 1402 lock_clear_recursive(&map->lock); 1403 } 1404 if (rv) { 1405 vm_map_unlock(map); 1406 (void) vm_map_pageable(map, start, failed, TRUE); 1407 return (rv); 1408 } 1409 } 1410 1411 vm_map_unlock(map); 1412 1413 return (KERN_SUCCESS); 1414 } 1415 1416 /* 1417 * vm_map_clean 1418 * 1419 * Push any dirty cached pages in the address range to their pager. 1420 * If syncio is TRUE, dirty pages are written synchronously. 1421 * If invalidate is TRUE, any cached pages are freed as well. 1422 * 1423 * Returns an error if any part of the specified range is not mapped. 1424 */ 1425 int 1426 vm_map_clean(map, start, end, syncio, invalidate) 1427 vm_map_t map; 1428 vm_offset_t start; 1429 vm_offset_t end; 1430 boolean_t syncio; 1431 boolean_t invalidate; 1432 { 1433 register vm_map_entry_t current; 1434 vm_map_entry_t entry; 1435 vm_size_t size; 1436 vm_object_t object; 1437 vm_offset_t offset; 1438 1439 vm_map_lock_read(map); 1440 VM_MAP_RANGE_CHECK(map, start, end); 1441 if (!vm_map_lookup_entry(map, start, &entry)) { 1442 vm_map_unlock_read(map); 1443 return (KERN_INVALID_ADDRESS); 1444 } 1445 /* 1446 * Make a first pass to check for holes. 1447 */ 1448 for (current = entry; current->start < end; current = current->next) { 1449 if (current->is_sub_map) { 1450 vm_map_unlock_read(map); 1451 return (KERN_INVALID_ARGUMENT); 1452 } 1453 if (end > current->end && 1454 (current->next == &map->header || 1455 current->end != current->next->start)) { 1456 vm_map_unlock_read(map); 1457 return (KERN_INVALID_ADDRESS); 1458 } 1459 } 1460 1461 /* 1462 * Make a second pass, cleaning/uncaching pages from the indicated 1463 * objects as we go. 1464 */ 1465 for (current = entry; current->start < end; current = current->next) { 1466 offset = current->offset + (start - current->start); 1467 size = (end <= current->end ? end : current->end) - start; 1468 if (current->is_a_map || current->is_sub_map) { 1469 register vm_map_t smap; 1470 vm_map_entry_t tentry; 1471 vm_size_t tsize; 1472 1473 smap = current->object.share_map; 1474 vm_map_lock_read(smap); 1475 (void) vm_map_lookup_entry(smap, offset, &tentry); 1476 tsize = tentry->end - offset; 1477 if (tsize < size) 1478 size = tsize; 1479 object = tentry->object.vm_object; 1480 offset = tentry->offset + (offset - tentry->start); 1481 vm_map_unlock_read(smap); 1482 } else { 1483 object = current->object.vm_object; 1484 } 1485 if (object && (object->type == OBJT_VNODE)) { 1486 /* 1487 * Flush pages if writing is allowed. XXX should we continue 1488 * on an error? 1489 * 1490 * XXX Doing async I/O and then removing all the pages from 1491 * the object before it completes is probably a very bad 1492 * idea. 1493 */ 1494 if (current->protection & VM_PROT_WRITE) 1495 vm_object_page_clean(object, offset, offset + size, syncio, TRUE); 1496 if (invalidate) 1497 vm_object_page_remove(object, offset, offset + size, FALSE); 1498 } 1499 start += size; 1500 } 1501 1502 vm_map_unlock_read(map); 1503 return (KERN_SUCCESS); 1504 } 1505 1506 /* 1507 * vm_map_entry_unwire: [ internal use only ] 1508 * 1509 * Make the region specified by this entry pageable. 1510 * 1511 * The map in question should be locked. 1512 * [This is the reason for this routine's existence.] 1513 */ 1514 void 1515 vm_map_entry_unwire(map, entry) 1516 vm_map_t map; 1517 register vm_map_entry_t entry; 1518 { 1519 vm_fault_unwire(map, entry->start, entry->end); 1520 entry->wired_count = 0; 1521 } 1522 1523 /* 1524 * vm_map_entry_delete: [ internal use only ] 1525 * 1526 * Deallocate the given entry from the target map. 1527 */ 1528 void 1529 vm_map_entry_delete(map, entry) 1530 register vm_map_t map; 1531 register vm_map_entry_t entry; 1532 { 1533 if (entry->wired_count != 0) 1534 vm_map_entry_unwire(map, entry); 1535 1536 vm_map_entry_unlink(map, entry); 1537 map->size -= entry->end - entry->start; 1538 1539 if (entry->is_a_map || entry->is_sub_map) 1540 vm_map_deallocate(entry->object.share_map); 1541 else 1542 vm_object_deallocate(entry->object.vm_object); 1543 1544 vm_map_entry_dispose(map, entry); 1545 } 1546 1547 /* 1548 * vm_map_delete: [ internal use only ] 1549 * 1550 * Deallocates the given address range from the target 1551 * map. 1552 * 1553 * When called with a sharing map, removes pages from 1554 * that region from all physical maps. 1555 */ 1556 int 1557 vm_map_delete(map, start, end) 1558 register vm_map_t map; 1559 vm_offset_t start; 1560 register vm_offset_t end; 1561 { 1562 register vm_map_entry_t entry; 1563 vm_map_entry_t first_entry; 1564 1565 /* 1566 * Find the start of the region, and clip it 1567 */ 1568 1569 if (!vm_map_lookup_entry(map, start, &first_entry)) 1570 entry = first_entry->next; 1571 else { 1572 entry = first_entry; 1573 vm_map_clip_start(map, entry, start); 1574 1575 /* 1576 * Fix the lookup hint now, rather than each time though the 1577 * loop. 1578 */ 1579 1580 SAVE_HINT(map, entry->prev); 1581 } 1582 1583 /* 1584 * Save the free space hint 1585 */ 1586 1587 if (map->first_free->start >= start) 1588 map->first_free = entry->prev; 1589 1590 /* 1591 * Step through all entries in this region 1592 */ 1593 1594 while ((entry != &map->header) && (entry->start < end)) { 1595 vm_map_entry_t next; 1596 register vm_offset_t s, e; 1597 register vm_object_t object; 1598 1599 vm_map_clip_end(map, entry, end); 1600 1601 next = entry->next; 1602 s = entry->start; 1603 e = entry->end; 1604 1605 /* 1606 * Unwire before removing addresses from the pmap; otherwise, 1607 * unwiring will put the entries back in the pmap. 1608 */ 1609 1610 object = entry->object.vm_object; 1611 if (entry->wired_count != 0) 1612 vm_map_entry_unwire(map, entry); 1613 1614 /* 1615 * If this is a sharing map, we must remove *all* references 1616 * to this data, since we can't find all of the physical maps 1617 * which are sharing it. 1618 */ 1619 1620 if (object == kernel_object || object == kmem_object) 1621 vm_object_page_remove(object, entry->offset, 1622 entry->offset + (e - s), FALSE); 1623 else if (!map->is_main_map) 1624 vm_object_pmap_remove(object, 1625 entry->offset, 1626 entry->offset + (e - s)); 1627 else 1628 pmap_remove(map->pmap, s, e); 1629 1630 /* 1631 * Delete the entry (which may delete the object) only after 1632 * removing all pmap entries pointing to its pages. 1633 * (Otherwise, its page frames may be reallocated, and any 1634 * modify bits will be set in the wrong object!) 1635 */ 1636 1637 vm_map_entry_delete(map, entry); 1638 entry = next; 1639 } 1640 return (KERN_SUCCESS); 1641 } 1642 1643 /* 1644 * vm_map_remove: 1645 * 1646 * Remove the given address range from the target map. 1647 * This is the exported form of vm_map_delete. 1648 */ 1649 int 1650 vm_map_remove(map, start, end) 1651 register vm_map_t map; 1652 register vm_offset_t start; 1653 register vm_offset_t end; 1654 { 1655 register int result, s = 0; 1656 1657 if (map == kmem_map) 1658 s = splhigh(); 1659 1660 vm_map_lock(map); 1661 VM_MAP_RANGE_CHECK(map, start, end); 1662 result = vm_map_delete(map, start, end); 1663 vm_map_unlock(map); 1664 1665 if (map == kmem_map) 1666 splx(s); 1667 1668 return (result); 1669 } 1670 1671 /* 1672 * vm_map_check_protection: 1673 * 1674 * Assert that the target map allows the specified 1675 * privilege on the entire address region given. 1676 * The entire region must be allocated. 1677 */ 1678 boolean_t 1679 vm_map_check_protection(map, start, end, protection) 1680 register vm_map_t map; 1681 register vm_offset_t start; 1682 register vm_offset_t end; 1683 register vm_prot_t protection; 1684 { 1685 register vm_map_entry_t entry; 1686 vm_map_entry_t tmp_entry; 1687 1688 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 1689 return (FALSE); 1690 } 1691 entry = tmp_entry; 1692 1693 while (start < end) { 1694 if (entry == &map->header) { 1695 return (FALSE); 1696 } 1697 /* 1698 * No holes allowed! 1699 */ 1700 1701 if (start < entry->start) { 1702 return (FALSE); 1703 } 1704 /* 1705 * Check protection associated with entry. 1706 */ 1707 1708 if ((entry->protection & protection) != protection) { 1709 return (FALSE); 1710 } 1711 /* go to next entry */ 1712 1713 start = entry->end; 1714 entry = entry->next; 1715 } 1716 return (TRUE); 1717 } 1718 1719 /* 1720 * vm_map_copy_entry: 1721 * 1722 * Copies the contents of the source entry to the destination 1723 * entry. The entries *must* be aligned properly. 1724 */ 1725 void 1726 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 1727 vm_map_t src_map, dst_map; 1728 register vm_map_entry_t src_entry, dst_entry; 1729 { 1730 vm_object_t temp_object; 1731 1732 if (src_entry->is_sub_map || dst_entry->is_sub_map) 1733 return; 1734 1735 if (dst_entry->object.vm_object != NULL) 1736 printf("vm_map_copy_entry: dst_entry object not NULL!\n"); 1737 1738 /* 1739 * If our destination map was wired down, unwire it now. 1740 */ 1741 1742 if (dst_entry->wired_count != 0) 1743 vm_map_entry_unwire(dst_map, dst_entry); 1744 1745 /* 1746 * If we're dealing with a sharing map, we must remove the destination 1747 * pages from all maps (since we cannot know which maps this sharing 1748 * map belongs in). 1749 */ 1750 1751 if (dst_map->is_main_map) 1752 pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); 1753 else 1754 vm_object_pmap_remove(dst_entry->object.vm_object, 1755 dst_entry->offset, 1756 dst_entry->offset + 1757 (dst_entry->end - dst_entry->start)); 1758 1759 if (src_entry->wired_count == 0) { 1760 1761 boolean_t src_needs_copy; 1762 1763 /* 1764 * If the source entry is marked needs_copy, it is already 1765 * write-protected. 1766 */ 1767 if (!src_entry->needs_copy) { 1768 1769 boolean_t su; 1770 1771 /* 1772 * If the source entry has only one mapping, we can 1773 * just protect the virtual address range. 1774 */ 1775 if (!(su = src_map->is_main_map)) { 1776 su = (src_map->ref_count == 1); 1777 } 1778 if (su) { 1779 pmap_protect(src_map->pmap, 1780 src_entry->start, 1781 src_entry->end, 1782 src_entry->protection & ~VM_PROT_WRITE); 1783 } else { 1784 vm_object_pmap_copy(src_entry->object.vm_object, 1785 src_entry->offset, 1786 src_entry->offset + (src_entry->end 1787 - src_entry->start)); 1788 } 1789 } 1790 /* 1791 * Make a copy of the object. 1792 */ 1793 vm_object_copy(src_entry->object.vm_object, 1794 src_entry->offset, 1795 (vm_size_t) (src_entry->end - 1796 src_entry->start), 1797 &dst_entry->object.vm_object, 1798 &dst_entry->offset, 1799 &src_needs_copy); 1800 /* 1801 * If we didn't get a copy-object now, mark the source map 1802 * entry so that a shadow will be created to hold its changed 1803 * pages. 1804 */ 1805 if (src_needs_copy) 1806 src_entry->needs_copy = TRUE; 1807 1808 /* 1809 * The destination always needs to have a shadow created. 1810 */ 1811 dst_entry->needs_copy = TRUE; 1812 1813 /* 1814 * Mark the entries copy-on-write, so that write-enabling the 1815 * entry won't make copy-on-write pages writable. 1816 */ 1817 src_entry->copy_on_write = TRUE; 1818 dst_entry->copy_on_write = TRUE; 1819 1820 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 1821 dst_entry->end - dst_entry->start, src_entry->start); 1822 } else { 1823 /* 1824 * Of course, wired down pages can't be set copy-on-write. 1825 * Cause wired pages to be copied into the new map by 1826 * simulating faults (the new pages are pageable) 1827 */ 1828 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 1829 } 1830 } 1831 1832 /* 1833 * vmspace_fork: 1834 * Create a new process vmspace structure and vm_map 1835 * based on those of an existing process. The new map 1836 * is based on the old map, according to the inheritance 1837 * values on the regions in that map. 1838 * 1839 * The source map must not be locked. 1840 */ 1841 struct vmspace * 1842 vmspace_fork(vm1) 1843 register struct vmspace *vm1; 1844 { 1845 register struct vmspace *vm2; 1846 vm_map_t old_map = &vm1->vm_map; 1847 vm_map_t new_map; 1848 vm_map_entry_t old_entry; 1849 vm_map_entry_t new_entry; 1850 pmap_t new_pmap; 1851 1852 vm_map_lock(old_map); 1853 1854 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, 1855 old_map->entries_pageable); 1856 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 1857 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 1858 new_pmap = &vm2->vm_pmap; /* XXX */ 1859 new_map = &vm2->vm_map; /* XXX */ 1860 1861 old_entry = old_map->header.next; 1862 1863 while (old_entry != &old_map->header) { 1864 if (old_entry->is_sub_map) 1865 panic("vm_map_fork: encountered a submap"); 1866 1867 switch (old_entry->inheritance) { 1868 case VM_INHERIT_NONE: 1869 break; 1870 1871 case VM_INHERIT_SHARE: 1872 /* 1873 * Clone the entry, referencing the sharing map. 1874 */ 1875 new_entry = vm_map_entry_create(new_map); 1876 *new_entry = *old_entry; 1877 new_entry->wired_count = 0; 1878 ++new_entry->object.vm_object->ref_count; 1879 1880 /* 1881 * Insert the entry into the new map -- we know we're 1882 * inserting at the end of the new map. 1883 */ 1884 1885 vm_map_entry_link(new_map, new_map->header.prev, 1886 new_entry); 1887 1888 /* 1889 * Update the physical map 1890 */ 1891 1892 pmap_copy(new_map->pmap, old_map->pmap, 1893 new_entry->start, 1894 (old_entry->end - old_entry->start), 1895 old_entry->start); 1896 break; 1897 1898 case VM_INHERIT_COPY: 1899 /* 1900 * Clone the entry and link into the map. 1901 */ 1902 1903 new_entry = vm_map_entry_create(new_map); 1904 *new_entry = *old_entry; 1905 new_entry->wired_count = 0; 1906 new_entry->object.vm_object = NULL; 1907 new_entry->is_a_map = FALSE; 1908 vm_map_entry_link(new_map, new_map->header.prev, 1909 new_entry); 1910 vm_map_copy_entry(old_map, new_map, old_entry, new_entry); 1911 break; 1912 } 1913 old_entry = old_entry->next; 1914 } 1915 1916 new_map->size = old_map->size; 1917 vm_map_unlock(old_map); 1918 1919 return (vm2); 1920 } 1921 1922 /* 1923 * vm_map_lookup: 1924 * 1925 * Finds the VM object, offset, and 1926 * protection for a given virtual address in the 1927 * specified map, assuming a page fault of the 1928 * type specified. 1929 * 1930 * Leaves the map in question locked for read; return 1931 * values are guaranteed until a vm_map_lookup_done 1932 * call is performed. Note that the map argument 1933 * is in/out; the returned map must be used in 1934 * the call to vm_map_lookup_done. 1935 * 1936 * A handle (out_entry) is returned for use in 1937 * vm_map_lookup_done, to make that fast. 1938 * 1939 * If a lookup is requested with "write protection" 1940 * specified, the map may be changed to perform virtual 1941 * copying operations, although the data referenced will 1942 * remain the same. 1943 */ 1944 int 1945 vm_map_lookup(var_map, vaddr, fault_type, out_entry, 1946 object, offset, out_prot, wired, single_use) 1947 vm_map_t *var_map; /* IN/OUT */ 1948 register vm_offset_t vaddr; 1949 register vm_prot_t fault_type; 1950 1951 vm_map_entry_t *out_entry; /* OUT */ 1952 vm_object_t *object; /* OUT */ 1953 vm_offset_t *offset; /* OUT */ 1954 vm_prot_t *out_prot; /* OUT */ 1955 boolean_t *wired; /* OUT */ 1956 boolean_t *single_use; /* OUT */ 1957 { 1958 vm_map_t share_map; 1959 vm_offset_t share_offset; 1960 register vm_map_entry_t entry; 1961 register vm_map_t map = *var_map; 1962 register vm_prot_t prot; 1963 register boolean_t su; 1964 1965 RetryLookup:; 1966 1967 /* 1968 * Lookup the faulting address. 1969 */ 1970 1971 vm_map_lock_read(map); 1972 1973 #define RETURN(why) \ 1974 { \ 1975 vm_map_unlock_read(map); \ 1976 return(why); \ 1977 } 1978 1979 /* 1980 * If the map has an interesting hint, try it before calling full 1981 * blown lookup routine. 1982 */ 1983 1984 entry = map->hint; 1985 1986 *out_entry = entry; 1987 1988 if ((entry == &map->header) || 1989 (vaddr < entry->start) || (vaddr >= entry->end)) { 1990 vm_map_entry_t tmp_entry; 1991 1992 /* 1993 * Entry was either not a valid hint, or the vaddr was not 1994 * contained in the entry, so do a full lookup. 1995 */ 1996 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 1997 RETURN(KERN_INVALID_ADDRESS); 1998 1999 entry = tmp_entry; 2000 *out_entry = entry; 2001 } 2002 /* 2003 * Handle submaps. 2004 */ 2005 2006 if (entry->is_sub_map) { 2007 vm_map_t old_map = map; 2008 2009 *var_map = map = entry->object.sub_map; 2010 vm_map_unlock_read(old_map); 2011 goto RetryLookup; 2012 } 2013 /* 2014 * Check whether this task is allowed to have this page. 2015 */ 2016 2017 prot = entry->protection; 2018 if ((fault_type & (prot)) != fault_type) 2019 RETURN(KERN_PROTECTION_FAILURE); 2020 2021 /* 2022 * If this page is not pageable, we have to get it for all possible 2023 * accesses. 2024 */ 2025 2026 *wired = (entry->wired_count != 0); 2027 if (*wired) 2028 prot = fault_type = entry->protection; 2029 2030 /* 2031 * If we don't already have a VM object, track it down. 2032 */ 2033 2034 su = !entry->is_a_map; 2035 if (su) { 2036 share_map = map; 2037 share_offset = vaddr; 2038 } else { 2039 vm_map_entry_t share_entry; 2040 2041 /* 2042 * Compute the sharing map, and offset into it. 2043 */ 2044 2045 share_map = entry->object.share_map; 2046 share_offset = (vaddr - entry->start) + entry->offset; 2047 2048 /* 2049 * Look for the backing store object and offset 2050 */ 2051 2052 vm_map_lock_read(share_map); 2053 2054 if (!vm_map_lookup_entry(share_map, share_offset, 2055 &share_entry)) { 2056 vm_map_unlock_read(share_map); 2057 RETURN(KERN_INVALID_ADDRESS); 2058 } 2059 entry = share_entry; 2060 } 2061 2062 /* 2063 * If the entry was copy-on-write, we either ... 2064 */ 2065 2066 if (entry->needs_copy) { 2067 /* 2068 * If we want to write the page, we may as well handle that 2069 * now since we've got the sharing map locked. 2070 * 2071 * If we don't need to write the page, we just demote the 2072 * permissions allowed. 2073 */ 2074 2075 if (fault_type & VM_PROT_WRITE) { 2076 /* 2077 * Make a new object, and place it in the object 2078 * chain. Note that no new references have appeared 2079 * -- one just moved from the share map to the new 2080 * object. 2081 */ 2082 2083 if (lock_read_to_write(&share_map->lock)) { 2084 if (share_map != map) 2085 vm_map_unlock_read(map); 2086 goto RetryLookup; 2087 } 2088 vm_object_shadow( 2089 &entry->object.vm_object, 2090 &entry->offset, 2091 (vm_size_t) (entry->end - entry->start)); 2092 2093 entry->needs_copy = FALSE; 2094 2095 lock_write_to_read(&share_map->lock); 2096 } else { 2097 /* 2098 * We're attempting to read a copy-on-write page -- 2099 * don't allow writes. 2100 */ 2101 2102 prot &= (~VM_PROT_WRITE); 2103 } 2104 } 2105 /* 2106 * Create an object if necessary. 2107 */ 2108 if (entry->object.vm_object == NULL) { 2109 2110 if (lock_read_to_write(&share_map->lock)) { 2111 if (share_map != map) 2112 vm_map_unlock_read(map); 2113 goto RetryLookup; 2114 } 2115 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2116 (vm_size_t) (entry->end - entry->start)); 2117 entry->offset = 0; 2118 lock_write_to_read(&share_map->lock); 2119 } 2120 /* 2121 * Return the object/offset from this entry. If the entry was 2122 * copy-on-write or empty, it has been fixed up. 2123 */ 2124 2125 *offset = (share_offset - entry->start) + entry->offset; 2126 *object = entry->object.vm_object; 2127 2128 /* 2129 * Return whether this is the only map sharing this data. 2130 */ 2131 2132 if (!su) { 2133 su = (share_map->ref_count == 1); 2134 } 2135 *out_prot = prot; 2136 *single_use = su; 2137 2138 return (KERN_SUCCESS); 2139 2140 #undef RETURN 2141 } 2142 2143 /* 2144 * vm_map_lookup_done: 2145 * 2146 * Releases locks acquired by a vm_map_lookup 2147 * (according to the handle returned by that lookup). 2148 */ 2149 2150 void 2151 vm_map_lookup_done(map, entry) 2152 register vm_map_t map; 2153 vm_map_entry_t entry; 2154 { 2155 /* 2156 * If this entry references a map, unlock it first. 2157 */ 2158 2159 if (entry->is_a_map) 2160 vm_map_unlock_read(entry->object.share_map); 2161 2162 /* 2163 * Unlock the main-level map 2164 */ 2165 2166 vm_map_unlock_read(map); 2167 } 2168 2169 /* 2170 * Routine: vm_map_simplify 2171 * Purpose: 2172 * Attempt to simplify the map representation in 2173 * the vicinity of the given starting address. 2174 * Note: 2175 * This routine is intended primarily to keep the 2176 * kernel maps more compact -- they generally don't 2177 * benefit from the "expand a map entry" technology 2178 * at allocation time because the adjacent entry 2179 * is often wired down. 2180 */ 2181 void 2182 vm_map_simplify(map, start) 2183 vm_map_t map; 2184 vm_offset_t start; 2185 { 2186 vm_map_entry_t this_entry; 2187 vm_map_entry_t prev_entry; 2188 2189 vm_map_lock(map); 2190 if ( 2191 (vm_map_lookup_entry(map, start, &this_entry)) && 2192 ((prev_entry = this_entry->prev) != &map->header) && 2193 2194 (prev_entry->end == start) && 2195 (map->is_main_map) && 2196 2197 (prev_entry->is_a_map == FALSE) && 2198 (prev_entry->is_sub_map == FALSE) && 2199 2200 (this_entry->is_a_map == FALSE) && 2201 (this_entry->is_sub_map == FALSE) && 2202 2203 (prev_entry->inheritance == this_entry->inheritance) && 2204 (prev_entry->protection == this_entry->protection) && 2205 (prev_entry->max_protection == this_entry->max_protection) && 2206 (prev_entry->wired_count == this_entry->wired_count) && 2207 2208 (prev_entry->copy_on_write == this_entry->copy_on_write) && 2209 (prev_entry->needs_copy == this_entry->needs_copy) && 2210 2211 (prev_entry->object.vm_object == this_entry->object.vm_object) && 2212 ((prev_entry->offset + (prev_entry->end - prev_entry->start)) 2213 == this_entry->offset) 2214 ) { 2215 if (map->first_free == this_entry) 2216 map->first_free = prev_entry; 2217 2218 if (!this_entry->object.vm_object->paging_in_progress) { 2219 SAVE_HINT(map, prev_entry); 2220 vm_map_entry_unlink(map, this_entry); 2221 prev_entry->end = this_entry->end; 2222 vm_object_deallocate(this_entry->object.vm_object); 2223 vm_map_entry_dispose(map, this_entry); 2224 } 2225 } 2226 vm_map_unlock(map); 2227 } 2228 2229 #ifdef DDB 2230 /* 2231 * vm_map_print: [ debug ] 2232 */ 2233 void 2234 vm_map_print(imap, full, dummy3, dummy4) 2235 /* db_expr_t */ int imap; 2236 boolean_t full; 2237 /* db_expr_t */ int dummy3; 2238 char *dummy4; 2239 { 2240 register vm_map_entry_t entry; 2241 register vm_map_t map = (vm_map_t)imap; /* XXX */ 2242 2243 iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", 2244 (map->is_main_map ? "Task" : "Share"), 2245 (int) map, (int) (map->pmap), map->ref_count, map->nentries, 2246 map->timestamp); 2247 2248 if (!full && indent) 2249 return; 2250 2251 indent += 2; 2252 for (entry = map->header.next; entry != &map->header; 2253 entry = entry->next) { 2254 iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", 2255 (int) entry, (int) entry->start, (int) entry->end); 2256 if (map->is_main_map) { 2257 static char *inheritance_name[4] = 2258 {"share", "copy", "none", "donate_copy"}; 2259 2260 printf("prot=%x/%x/%s, ", 2261 entry->protection, 2262 entry->max_protection, 2263 inheritance_name[entry->inheritance]); 2264 if (entry->wired_count != 0) 2265 printf("wired, "); 2266 } 2267 if (entry->is_a_map || entry->is_sub_map) { 2268 printf("share=0x%x, offset=0x%x\n", 2269 (int) entry->object.share_map, 2270 (int) entry->offset); 2271 if ((entry->prev == &map->header) || 2272 (!entry->prev->is_a_map) || 2273 (entry->prev->object.share_map != 2274 entry->object.share_map)) { 2275 indent += 2; 2276 vm_map_print((int)entry->object.share_map, 2277 full, 0, (char *)0); 2278 indent -= 2; 2279 } 2280 } else { 2281 printf("object=0x%x, offset=0x%x", 2282 (int) entry->object.vm_object, 2283 (int) entry->offset); 2284 if (entry->copy_on_write) 2285 printf(", copy (%s)", 2286 entry->needs_copy ? "needed" : "done"); 2287 printf("\n"); 2288 2289 if ((entry->prev == &map->header) || 2290 (entry->prev->is_a_map) || 2291 (entry->prev->object.vm_object != 2292 entry->object.vm_object)) { 2293 indent += 2; 2294 vm_object_print((int)entry->object.vm_object, 2295 full, 0, (char *)0); 2296 indent -= 2; 2297 } 2298 } 2299 } 2300 indent -= 2; 2301 } 2302 #endif 2303