1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD$ 65 */ 66 67 /* 68 * Virtual memory mapping module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/proc.h> 74 #include <sys/vmmeter.h> 75 #include <sys/mman.h> 76 #include <sys/vnode.h> 77 #include <sys/resourcevar.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <sys/lock.h> 82 #include <vm/pmap.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_pager.h> 87 #include <vm/vm_kern.h> 88 #include <vm/vm_extern.h> 89 #include <vm/vm_zone.h> 90 #include <vm/swap_pager.h> 91 92 /* 93 * Virtual memory maps provide for the mapping, protection, 94 * and sharing of virtual memory objects. In addition, 95 * this module provides for an efficient virtual copy of 96 * memory from one map to another. 97 * 98 * Synchronization is required prior to most operations. 99 * 100 * Maps consist of an ordered doubly-linked list of simple 101 * entries; a single hint is used to speed up lookups. 102 * 103 * Since portions of maps are specified by start/end addresses, 104 * which may not align with existing map entries, all 105 * routines merely "clip" entries to these start/end values. 106 * [That is, an entry is split into two, bordering at a 107 * start or end value.] Note that these clippings may not 108 * always be necessary (as the two resulting entries are then 109 * not changed); however, the clipping is done for convenience. 110 * 111 * As mentioned above, virtual copy operations are performed 112 * by copying VM object references from one map to 113 * another, and then marking both regions as copy-on-write. 114 */ 115 116 /* 117 * vm_map_startup: 118 * 119 * Initialize the vm_map module. Must be called before 120 * any other vm_map routines. 121 * 122 * Map and entry structures are allocated from the general 123 * purpose memory pool with some exceptions: 124 * 125 * - The kernel map and kmem submap are allocated statically. 126 * - Kernel map entries are allocated out of a static pool. 127 * 128 * These restrictions are necessary since malloc() uses the 129 * maps and requires map entries. 130 */ 131 132 static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store; 133 static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone; 134 static struct vm_object kmapentobj, mapentobj, mapobj; 135 136 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 137 static struct vm_map_entry kmap_entry_init[MAX_KMAPENT]; 138 static struct vm_map map_init[MAX_KMAP]; 139 140 static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 141 static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t)); 142 static vm_map_entry_t vm_map_entry_create __P((vm_map_t)); 143 static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t)); 144 static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t)); 145 static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t)); 146 static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, 147 vm_map_entry_t)); 148 static void vm_map_split __P((vm_map_entry_t)); 149 150 void 151 vm_map_startup() 152 { 153 mapzone = &mapzone_store; 154 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 155 map_init, MAX_KMAP); 156 kmapentzone = &kmapentzone_store; 157 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry), 158 kmap_entry_init, MAX_KMAPENT); 159 mapentzone = &mapentzone_store; 160 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 161 map_entry_init, MAX_MAPENT); 162 } 163 164 /* 165 * Allocate a vmspace structure, including a vm_map and pmap, 166 * and initialize those structures. The refcnt is set to 1. 167 * The remaining fields must be initialized by the caller. 168 */ 169 struct vmspace * 170 vmspace_alloc(min, max) 171 vm_offset_t min, max; 172 { 173 struct vmspace *vm; 174 175 vm = zalloc(vmspace_zone); 176 vm_map_init(&vm->vm_map, min, max); 177 pmap_pinit(vmspace_pmap(vm)); 178 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 179 vm->vm_refcnt = 1; 180 vm->vm_shm = NULL; 181 return (vm); 182 } 183 184 void 185 vm_init2(void) { 186 zinitna(kmapentzone, &kmapentobj, 187 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1); 188 zinitna(mapentzone, &mapentobj, 189 NULL, 0, 0, 0, 1); 190 zinitna(mapzone, &mapobj, 191 NULL, 0, 0, 0, 1); 192 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3); 193 pmap_init2(); 194 vm_object_init2(); 195 } 196 197 void 198 vmspace_free(vm) 199 struct vmspace *vm; 200 { 201 202 if (vm->vm_refcnt == 0) 203 panic("vmspace_free: attempt to free already freed vmspace"); 204 205 if (--vm->vm_refcnt == 0) { 206 207 /* 208 * Lock the map, to wait out all other references to it. 209 * Delete all of the mappings and pages they hold, then call 210 * the pmap module to reclaim anything left. 211 */ 212 vm_map_lock(&vm->vm_map); 213 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 214 vm->vm_map.max_offset); 215 vm_map_unlock(&vm->vm_map); 216 217 pmap_release(vmspace_pmap(vm)); 218 vm_map_destroy(&vm->vm_map); 219 zfree(vmspace_zone, vm); 220 } 221 } 222 223 /* 224 * vm_map_create: 225 * 226 * Creates and returns a new empty VM map with 227 * the given physical map structure, and having 228 * the given lower and upper address bounds. 229 */ 230 vm_map_t 231 vm_map_create(pmap, min, max) 232 pmap_t pmap; 233 vm_offset_t min, max; 234 { 235 vm_map_t result; 236 237 result = zalloc(mapzone); 238 vm_map_init(result, min, max); 239 result->pmap = pmap; 240 return (result); 241 } 242 243 /* 244 * Initialize an existing vm_map structure 245 * such as that in the vmspace structure. 246 * The pmap is set elsewhere. 247 */ 248 void 249 vm_map_init(map, min, max) 250 struct vm_map *map; 251 vm_offset_t min, max; 252 { 253 map->header.next = map->header.prev = &map->header; 254 map->nentries = 0; 255 map->size = 0; 256 map->system_map = 0; 257 map->min_offset = min; 258 map->max_offset = max; 259 map->first_free = &map->header; 260 map->hint = &map->header; 261 map->timestamp = 0; 262 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE); 263 } 264 265 void 266 vm_map_destroy(map) 267 struct vm_map *map; 268 { 269 lockdestroy(&map->lock); 270 } 271 272 /* 273 * vm_map_entry_dispose: [ internal use only ] 274 * 275 * Inverse of vm_map_entry_create. 276 */ 277 static void 278 vm_map_entry_dispose(map, entry) 279 vm_map_t map; 280 vm_map_entry_t entry; 281 { 282 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry); 283 } 284 285 /* 286 * vm_map_entry_create: [ internal use only ] 287 * 288 * Allocates a VM map entry for insertion. 289 * No entry fields are filled in. This routine is 290 */ 291 static vm_map_entry_t 292 vm_map_entry_create(map) 293 vm_map_t map; 294 { 295 vm_map_entry_t new_entry; 296 297 new_entry = zalloc((map->system_map || !mapentzone) ? 298 kmapentzone : mapentzone); 299 if (new_entry == NULL) 300 panic("vm_map_entry_create: kernel resources exhausted"); 301 return(new_entry); 302 } 303 304 /* 305 * vm_map_entry_{un,}link: 306 * 307 * Insert/remove entries from maps. 308 */ 309 static __inline void 310 vm_map_entry_link(vm_map_t map, 311 vm_map_entry_t after_where, 312 vm_map_entry_t entry) 313 { 314 map->nentries++; 315 entry->prev = after_where; 316 entry->next = after_where->next; 317 entry->next->prev = entry; 318 after_where->next = entry; 319 } 320 321 static __inline void 322 vm_map_entry_unlink(vm_map_t map, 323 vm_map_entry_t entry) 324 { 325 vm_map_entry_t prev = entry->prev; 326 vm_map_entry_t next = entry->next; 327 328 next->prev = prev; 329 prev->next = next; 330 map->nentries--; 331 } 332 333 /* 334 * SAVE_HINT: 335 * 336 * Saves the specified entry as the hint for 337 * future lookups. 338 */ 339 #define SAVE_HINT(map,value) \ 340 (map)->hint = (value); 341 342 /* 343 * vm_map_lookup_entry: [ internal use only ] 344 * 345 * Finds the map entry containing (or 346 * immediately preceding) the specified address 347 * in the given map; the entry is returned 348 * in the "entry" parameter. The boolean 349 * result indicates whether the address is 350 * actually contained in the map. 351 */ 352 boolean_t 353 vm_map_lookup_entry(map, address, entry) 354 vm_map_t map; 355 vm_offset_t address; 356 vm_map_entry_t *entry; /* OUT */ 357 { 358 vm_map_entry_t cur; 359 vm_map_entry_t last; 360 361 /* 362 * Start looking either from the head of the list, or from the hint. 363 */ 364 365 cur = map->hint; 366 367 if (cur == &map->header) 368 cur = cur->next; 369 370 if (address >= cur->start) { 371 /* 372 * Go from hint to end of list. 373 * 374 * But first, make a quick check to see if we are already looking 375 * at the entry we want (which is usually the case). Note also 376 * that we don't need to save the hint here... it is the same 377 * hint (unless we are at the header, in which case the hint 378 * didn't buy us anything anyway). 379 */ 380 last = &map->header; 381 if ((cur != last) && (cur->end > address)) { 382 *entry = cur; 383 return (TRUE); 384 } 385 } else { 386 /* 387 * Go from start to hint, *inclusively* 388 */ 389 last = cur->next; 390 cur = map->header.next; 391 } 392 393 /* 394 * Search linearly 395 */ 396 397 while (cur != last) { 398 if (cur->end > address) { 399 if (address >= cur->start) { 400 /* 401 * Save this lookup for future hints, and 402 * return 403 */ 404 405 *entry = cur; 406 SAVE_HINT(map, cur); 407 return (TRUE); 408 } 409 break; 410 } 411 cur = cur->next; 412 } 413 *entry = cur->prev; 414 SAVE_HINT(map, *entry); 415 return (FALSE); 416 } 417 418 /* 419 * vm_map_insert: 420 * 421 * Inserts the given whole VM object into the target 422 * map at the specified address range. The object's 423 * size should match that of the address range. 424 * 425 * Requires that the map be locked, and leaves it so. 426 * 427 * If object is non-NULL, ref count must be bumped by caller 428 * prior to making call to account for the new entry. 429 */ 430 int 431 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 432 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 433 int cow) 434 { 435 vm_map_entry_t new_entry; 436 vm_map_entry_t prev_entry; 437 vm_map_entry_t temp_entry; 438 vm_eflags_t protoeflags; 439 440 /* 441 * Check that the start and end points are not bogus. 442 */ 443 444 if ((start < map->min_offset) || (end > map->max_offset) || 445 (start >= end)) 446 return (KERN_INVALID_ADDRESS); 447 448 /* 449 * Find the entry prior to the proposed starting address; if it's part 450 * of an existing entry, this range is bogus. 451 */ 452 453 if (vm_map_lookup_entry(map, start, &temp_entry)) 454 return (KERN_NO_SPACE); 455 456 prev_entry = temp_entry; 457 458 /* 459 * Assert that the next entry doesn't overlap the end point. 460 */ 461 462 if ((prev_entry->next != &map->header) && 463 (prev_entry->next->start < end)) 464 return (KERN_NO_SPACE); 465 466 protoeflags = 0; 467 468 if (cow & MAP_COPY_ON_WRITE) 469 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 470 471 if (cow & MAP_NOFAULT) { 472 protoeflags |= MAP_ENTRY_NOFAULT; 473 474 KASSERT(object == NULL, 475 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 476 } 477 if (cow & MAP_DISABLE_SYNCER) 478 protoeflags |= MAP_ENTRY_NOSYNC; 479 if (cow & MAP_DISABLE_COREDUMP) 480 protoeflags |= MAP_ENTRY_NOCOREDUMP; 481 482 if (object) { 483 /* 484 * When object is non-NULL, it could be shared with another 485 * process. We have to set or clear OBJ_ONEMAPPING 486 * appropriately. 487 */ 488 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 489 vm_object_clear_flag(object, OBJ_ONEMAPPING); 490 } 491 } 492 else if ((prev_entry != &map->header) && 493 (prev_entry->eflags == protoeflags) && 494 (prev_entry->end == start) && 495 (prev_entry->wired_count == 0) && 496 ((prev_entry->object.vm_object == NULL) || 497 vm_object_coalesce(prev_entry->object.vm_object, 498 OFF_TO_IDX(prev_entry->offset), 499 (vm_size_t)(prev_entry->end - prev_entry->start), 500 (vm_size_t)(end - prev_entry->end)))) { 501 /* 502 * We were able to extend the object. Determine if we 503 * can extend the previous map entry to include the 504 * new range as well. 505 */ 506 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 507 (prev_entry->protection == prot) && 508 (prev_entry->max_protection == max)) { 509 map->size += (end - prev_entry->end); 510 prev_entry->end = end; 511 vm_map_simplify_entry(map, prev_entry); 512 return (KERN_SUCCESS); 513 } 514 515 /* 516 * If we can extend the object but cannot extend the 517 * map entry, we have to create a new map entry. We 518 * must bump the ref count on the extended object to 519 * account for it. object may be NULL. 520 */ 521 object = prev_entry->object.vm_object; 522 offset = prev_entry->offset + 523 (prev_entry->end - prev_entry->start); 524 vm_object_reference(object); 525 } 526 527 /* 528 * NOTE: if conditionals fail, object can be NULL here. This occurs 529 * in things like the buffer map where we manage kva but do not manage 530 * backing objects. 531 */ 532 533 /* 534 * Create a new entry 535 */ 536 537 new_entry = vm_map_entry_create(map); 538 new_entry->start = start; 539 new_entry->end = end; 540 541 new_entry->eflags = protoeflags; 542 new_entry->object.vm_object = object; 543 new_entry->offset = offset; 544 new_entry->avail_ssize = 0; 545 546 new_entry->inheritance = VM_INHERIT_DEFAULT; 547 new_entry->protection = prot; 548 new_entry->max_protection = max; 549 new_entry->wired_count = 0; 550 551 /* 552 * Insert the new entry into the list 553 */ 554 555 vm_map_entry_link(map, prev_entry, new_entry); 556 map->size += new_entry->end - new_entry->start; 557 558 /* 559 * Update the free space hint 560 */ 561 if ((map->first_free == prev_entry) && 562 (prev_entry->end >= new_entry->start)) { 563 map->first_free = new_entry; 564 } 565 566 /* 567 * It may be possible to simplify the entry 568 */ 569 vm_map_simplify_entry(map, new_entry); 570 571 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 572 pmap_object_init_pt(map->pmap, start, 573 object, OFF_TO_IDX(offset), end - start, 574 cow & MAP_PREFAULT_PARTIAL); 575 } 576 577 return (KERN_SUCCESS); 578 } 579 580 /* 581 * Find sufficient space for `length' bytes in the given map, starting at 582 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 583 */ 584 int 585 vm_map_findspace(map, start, length, addr) 586 vm_map_t map; 587 vm_offset_t start; 588 vm_size_t length; 589 vm_offset_t *addr; 590 { 591 vm_map_entry_t entry, next; 592 vm_offset_t end; 593 594 if (start < map->min_offset) 595 start = map->min_offset; 596 if (start > map->max_offset) 597 return (1); 598 599 /* 600 * Look for the first possible address; if there's already something 601 * at this address, we have to start after it. 602 */ 603 if (start == map->min_offset) { 604 if ((entry = map->first_free) != &map->header) 605 start = entry->end; 606 } else { 607 vm_map_entry_t tmp; 608 609 if (vm_map_lookup_entry(map, start, &tmp)) 610 start = tmp->end; 611 entry = tmp; 612 } 613 614 /* 615 * Look through the rest of the map, trying to fit a new region in the 616 * gap between existing regions, or after the very last region. 617 */ 618 for (;; start = (entry = next)->end) { 619 /* 620 * Find the end of the proposed new region. Be sure we didn't 621 * go beyond the end of the map, or wrap around the address; 622 * if so, we lose. Otherwise, if this is the last entry, or 623 * if the proposed new region fits before the next entry, we 624 * win. 625 */ 626 end = start + length; 627 if (end > map->max_offset || end < start) 628 return (1); 629 next = entry->next; 630 if (next == &map->header || next->start >= end) 631 break; 632 } 633 SAVE_HINT(map, entry); 634 *addr = start; 635 if (map == kernel_map) { 636 vm_offset_t ksize; 637 if ((ksize = round_page(start + length)) > kernel_vm_end) { 638 pmap_growkernel(ksize); 639 } 640 } 641 return (0); 642 } 643 644 /* 645 * vm_map_find finds an unallocated region in the target address 646 * map with the given length. The search is defined to be 647 * first-fit from the specified address; the region found is 648 * returned in the same parameter. 649 * 650 * If object is non-NULL, ref count must be bumped by caller 651 * prior to making call to account for the new entry. 652 */ 653 int 654 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 655 vm_offset_t *addr, /* IN/OUT */ 656 vm_size_t length, boolean_t find_space, vm_prot_t prot, 657 vm_prot_t max, int cow) 658 { 659 vm_offset_t start; 660 int result, s = 0; 661 662 start = *addr; 663 664 if (map == kmem_map || map == mb_map) 665 s = splvm(); 666 667 vm_map_lock(map); 668 if (find_space) { 669 if (vm_map_findspace(map, start, length, addr)) { 670 vm_map_unlock(map); 671 if (map == kmem_map || map == mb_map) 672 splx(s); 673 return (KERN_NO_SPACE); 674 } 675 start = *addr; 676 } 677 result = vm_map_insert(map, object, offset, 678 start, start + length, prot, max, cow); 679 vm_map_unlock(map); 680 681 if (map == kmem_map || map == mb_map) 682 splx(s); 683 684 return (result); 685 } 686 687 /* 688 * vm_map_simplify_entry: 689 * 690 * Simplify the given map entry by merging with either neighbor. This 691 * routine also has the ability to merge with both neighbors. 692 * 693 * The map must be locked. 694 * 695 * This routine guarentees that the passed entry remains valid (though 696 * possibly extended). When merging, this routine may delete one or 697 * both neighbors. 698 */ 699 void 700 vm_map_simplify_entry(map, entry) 701 vm_map_t map; 702 vm_map_entry_t entry; 703 { 704 vm_map_entry_t next, prev; 705 vm_size_t prevsize, esize; 706 707 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 708 return; 709 710 prev = entry->prev; 711 if (prev != &map->header) { 712 prevsize = prev->end - prev->start; 713 if ( (prev->end == entry->start) && 714 (prev->object.vm_object == entry->object.vm_object) && 715 (!prev->object.vm_object || 716 (prev->offset + prevsize == entry->offset)) && 717 (prev->eflags == entry->eflags) && 718 (prev->protection == entry->protection) && 719 (prev->max_protection == entry->max_protection) && 720 (prev->inheritance == entry->inheritance) && 721 (prev->wired_count == entry->wired_count)) { 722 if (map->first_free == prev) 723 map->first_free = entry; 724 if (map->hint == prev) 725 map->hint = entry; 726 vm_map_entry_unlink(map, prev); 727 entry->start = prev->start; 728 entry->offset = prev->offset; 729 if (prev->object.vm_object) 730 vm_object_deallocate(prev->object.vm_object); 731 vm_map_entry_dispose(map, prev); 732 } 733 } 734 735 next = entry->next; 736 if (next != &map->header) { 737 esize = entry->end - entry->start; 738 if ((entry->end == next->start) && 739 (next->object.vm_object == entry->object.vm_object) && 740 (!entry->object.vm_object || 741 (entry->offset + esize == next->offset)) && 742 (next->eflags == entry->eflags) && 743 (next->protection == entry->protection) && 744 (next->max_protection == entry->max_protection) && 745 (next->inheritance == entry->inheritance) && 746 (next->wired_count == entry->wired_count)) { 747 if (map->first_free == next) 748 map->first_free = entry; 749 if (map->hint == next) 750 map->hint = entry; 751 vm_map_entry_unlink(map, next); 752 entry->end = next->end; 753 if (next->object.vm_object) 754 vm_object_deallocate(next->object.vm_object); 755 vm_map_entry_dispose(map, next); 756 } 757 } 758 } 759 /* 760 * vm_map_clip_start: [ internal use only ] 761 * 762 * Asserts that the given entry begins at or after 763 * the specified address; if necessary, 764 * it splits the entry into two. 765 */ 766 #define vm_map_clip_start(map, entry, startaddr) \ 767 { \ 768 if (startaddr > entry->start) \ 769 _vm_map_clip_start(map, entry, startaddr); \ 770 } 771 772 /* 773 * This routine is called only when it is known that 774 * the entry must be split. 775 */ 776 static void 777 _vm_map_clip_start(map, entry, start) 778 vm_map_t map; 779 vm_map_entry_t entry; 780 vm_offset_t start; 781 { 782 vm_map_entry_t new_entry; 783 784 /* 785 * Split off the front portion -- note that we must insert the new 786 * entry BEFORE this one, so that this entry has the specified 787 * starting address. 788 */ 789 790 vm_map_simplify_entry(map, entry); 791 792 /* 793 * If there is no object backing this entry, we might as well create 794 * one now. If we defer it, an object can get created after the map 795 * is clipped, and individual objects will be created for the split-up 796 * map. This is a bit of a hack, but is also about the best place to 797 * put this improvement. 798 */ 799 800 if (entry->object.vm_object == NULL && !map->system_map) { 801 vm_object_t object; 802 object = vm_object_allocate(OBJT_DEFAULT, 803 atop(entry->end - entry->start)); 804 entry->object.vm_object = object; 805 entry->offset = 0; 806 } 807 808 new_entry = vm_map_entry_create(map); 809 *new_entry = *entry; 810 811 new_entry->end = start; 812 entry->offset += (start - entry->start); 813 entry->start = start; 814 815 vm_map_entry_link(map, entry->prev, new_entry); 816 817 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 818 vm_object_reference(new_entry->object.vm_object); 819 } 820 } 821 822 /* 823 * vm_map_clip_end: [ internal use only ] 824 * 825 * Asserts that the given entry ends at or before 826 * the specified address; if necessary, 827 * it splits the entry into two. 828 */ 829 830 #define vm_map_clip_end(map, entry, endaddr) \ 831 { \ 832 if (endaddr < entry->end) \ 833 _vm_map_clip_end(map, entry, endaddr); \ 834 } 835 836 /* 837 * This routine is called only when it is known that 838 * the entry must be split. 839 */ 840 static void 841 _vm_map_clip_end(map, entry, end) 842 vm_map_t map; 843 vm_map_entry_t entry; 844 vm_offset_t end; 845 { 846 vm_map_entry_t new_entry; 847 848 /* 849 * If there is no object backing this entry, we might as well create 850 * one now. If we defer it, an object can get created after the map 851 * is clipped, and individual objects will be created for the split-up 852 * map. This is a bit of a hack, but is also about the best place to 853 * put this improvement. 854 */ 855 856 if (entry->object.vm_object == NULL && !map->system_map) { 857 vm_object_t object; 858 object = vm_object_allocate(OBJT_DEFAULT, 859 atop(entry->end - entry->start)); 860 entry->object.vm_object = object; 861 entry->offset = 0; 862 } 863 864 /* 865 * Create a new entry and insert it AFTER the specified entry 866 */ 867 868 new_entry = vm_map_entry_create(map); 869 *new_entry = *entry; 870 871 new_entry->start = entry->end = end; 872 new_entry->offset += (end - entry->start); 873 874 vm_map_entry_link(map, entry, new_entry); 875 876 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 877 vm_object_reference(new_entry->object.vm_object); 878 } 879 } 880 881 /* 882 * VM_MAP_RANGE_CHECK: [ internal use only ] 883 * 884 * Asserts that the starting and ending region 885 * addresses fall within the valid range of the map. 886 */ 887 #define VM_MAP_RANGE_CHECK(map, start, end) \ 888 { \ 889 if (start < vm_map_min(map)) \ 890 start = vm_map_min(map); \ 891 if (end > vm_map_max(map)) \ 892 end = vm_map_max(map); \ 893 if (start > end) \ 894 start = end; \ 895 } 896 897 /* 898 * vm_map_submap: [ kernel use only ] 899 * 900 * Mark the given range as handled by a subordinate map. 901 * 902 * This range must have been created with vm_map_find, 903 * and no other operations may have been performed on this 904 * range prior to calling vm_map_submap. 905 * 906 * Only a limited number of operations can be performed 907 * within this rage after calling vm_map_submap: 908 * vm_fault 909 * [Don't try vm_map_copy!] 910 * 911 * To remove a submapping, one must first remove the 912 * range from the superior map, and then destroy the 913 * submap (if desired). [Better yet, don't try it.] 914 */ 915 int 916 vm_map_submap(map, start, end, submap) 917 vm_map_t map; 918 vm_offset_t start; 919 vm_offset_t end; 920 vm_map_t submap; 921 { 922 vm_map_entry_t entry; 923 int result = KERN_INVALID_ARGUMENT; 924 925 vm_map_lock(map); 926 927 VM_MAP_RANGE_CHECK(map, start, end); 928 929 if (vm_map_lookup_entry(map, start, &entry)) { 930 vm_map_clip_start(map, entry, start); 931 } else 932 entry = entry->next; 933 934 vm_map_clip_end(map, entry, end); 935 936 if ((entry->start == start) && (entry->end == end) && 937 ((entry->eflags & MAP_ENTRY_COW) == 0) && 938 (entry->object.vm_object == NULL)) { 939 entry->object.sub_map = submap; 940 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 941 result = KERN_SUCCESS; 942 } 943 vm_map_unlock(map); 944 945 return (result); 946 } 947 948 /* 949 * vm_map_protect: 950 * 951 * Sets the protection of the specified address 952 * region in the target map. If "set_max" is 953 * specified, the maximum protection is to be set; 954 * otherwise, only the current protection is affected. 955 */ 956 int 957 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 958 vm_prot_t new_prot, boolean_t set_max) 959 { 960 vm_map_entry_t current; 961 vm_map_entry_t entry; 962 963 vm_map_lock(map); 964 965 VM_MAP_RANGE_CHECK(map, start, end); 966 967 if (vm_map_lookup_entry(map, start, &entry)) { 968 vm_map_clip_start(map, entry, start); 969 } else { 970 entry = entry->next; 971 } 972 973 /* 974 * Make a first pass to check for protection violations. 975 */ 976 977 current = entry; 978 while ((current != &map->header) && (current->start < end)) { 979 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 980 vm_map_unlock(map); 981 return (KERN_INVALID_ARGUMENT); 982 } 983 if ((new_prot & current->max_protection) != new_prot) { 984 vm_map_unlock(map); 985 return (KERN_PROTECTION_FAILURE); 986 } 987 current = current->next; 988 } 989 990 /* 991 * Go back and fix up protections. [Note that clipping is not 992 * necessary the second time.] 993 */ 994 995 current = entry; 996 997 while ((current != &map->header) && (current->start < end)) { 998 vm_prot_t old_prot; 999 1000 vm_map_clip_end(map, current, end); 1001 1002 old_prot = current->protection; 1003 if (set_max) 1004 current->protection = 1005 (current->max_protection = new_prot) & 1006 old_prot; 1007 else 1008 current->protection = new_prot; 1009 1010 /* 1011 * Update physical map if necessary. Worry about copy-on-write 1012 * here -- CHECK THIS XXX 1013 */ 1014 1015 if (current->protection != old_prot) { 1016 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1017 VM_PROT_ALL) 1018 1019 pmap_protect(map->pmap, current->start, 1020 current->end, 1021 current->protection & MASK(current)); 1022 #undef MASK 1023 } 1024 1025 vm_map_simplify_entry(map, current); 1026 1027 current = current->next; 1028 } 1029 1030 vm_map_unlock(map); 1031 return (KERN_SUCCESS); 1032 } 1033 1034 /* 1035 * vm_map_madvise: 1036 * 1037 * This routine traverses a processes map handling the madvise 1038 * system call. Advisories are classified as either those effecting 1039 * the vm_map_entry structure, or those effecting the underlying 1040 * objects. 1041 */ 1042 1043 int 1044 vm_map_madvise(map, start, end, behav) 1045 vm_map_t map; 1046 vm_offset_t start, end; 1047 int behav; 1048 { 1049 vm_map_entry_t current, entry; 1050 int modify_map = 0; 1051 1052 /* 1053 * Some madvise calls directly modify the vm_map_entry, in which case 1054 * we need to use an exclusive lock on the map and we need to perform 1055 * various clipping operations. Otherwise we only need a read-lock 1056 * on the map. 1057 */ 1058 1059 switch(behav) { 1060 case MADV_NORMAL: 1061 case MADV_SEQUENTIAL: 1062 case MADV_RANDOM: 1063 case MADV_NOSYNC: 1064 case MADV_AUTOSYNC: 1065 case MADV_NOCORE: 1066 case MADV_CORE: 1067 modify_map = 1; 1068 vm_map_lock(map); 1069 break; 1070 case MADV_WILLNEED: 1071 case MADV_DONTNEED: 1072 case MADV_FREE: 1073 vm_map_lock_read(map); 1074 break; 1075 default: 1076 return (KERN_INVALID_ARGUMENT); 1077 } 1078 1079 /* 1080 * Locate starting entry and clip if necessary. 1081 */ 1082 1083 VM_MAP_RANGE_CHECK(map, start, end); 1084 1085 if (vm_map_lookup_entry(map, start, &entry)) { 1086 if (modify_map) 1087 vm_map_clip_start(map, entry, start); 1088 } else { 1089 entry = entry->next; 1090 } 1091 1092 if (modify_map) { 1093 /* 1094 * madvise behaviors that are implemented in the vm_map_entry. 1095 * 1096 * We clip the vm_map_entry so that behavioral changes are 1097 * limited to the specified address range. 1098 */ 1099 for (current = entry; 1100 (current != &map->header) && (current->start < end); 1101 current = current->next 1102 ) { 1103 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1104 continue; 1105 1106 vm_map_clip_end(map, current, end); 1107 1108 switch (behav) { 1109 case MADV_NORMAL: 1110 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1111 break; 1112 case MADV_SEQUENTIAL: 1113 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1114 break; 1115 case MADV_RANDOM: 1116 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1117 break; 1118 case MADV_NOSYNC: 1119 current->eflags |= MAP_ENTRY_NOSYNC; 1120 break; 1121 case MADV_AUTOSYNC: 1122 current->eflags &= ~MAP_ENTRY_NOSYNC; 1123 break; 1124 case MADV_NOCORE: 1125 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1126 break; 1127 case MADV_CORE: 1128 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1129 break; 1130 default: 1131 break; 1132 } 1133 vm_map_simplify_entry(map, current); 1134 } 1135 vm_map_unlock(map); 1136 } else { 1137 vm_pindex_t pindex; 1138 int count; 1139 1140 /* 1141 * madvise behaviors that are implemented in the underlying 1142 * vm_object. 1143 * 1144 * Since we don't clip the vm_map_entry, we have to clip 1145 * the vm_object pindex and count. 1146 */ 1147 for (current = entry; 1148 (current != &map->header) && (current->start < end); 1149 current = current->next 1150 ) { 1151 vm_offset_t useStart; 1152 1153 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1154 continue; 1155 1156 pindex = OFF_TO_IDX(current->offset); 1157 count = atop(current->end - current->start); 1158 useStart = current->start; 1159 1160 if (current->start < start) { 1161 pindex += atop(start - current->start); 1162 count -= atop(start - current->start); 1163 useStart = start; 1164 } 1165 if (current->end > end) 1166 count -= atop(current->end - end); 1167 1168 if (count <= 0) 1169 continue; 1170 1171 vm_object_madvise(current->object.vm_object, 1172 pindex, count, behav); 1173 if (behav == MADV_WILLNEED) { 1174 pmap_object_init_pt( 1175 map->pmap, 1176 useStart, 1177 current->object.vm_object, 1178 pindex, 1179 (count << PAGE_SHIFT), 1180 0 1181 ); 1182 } 1183 } 1184 vm_map_unlock_read(map); 1185 } 1186 return(0); 1187 } 1188 1189 1190 /* 1191 * vm_map_inherit: 1192 * 1193 * Sets the inheritance of the specified address 1194 * range in the target map. Inheritance 1195 * affects how the map will be shared with 1196 * child maps at the time of vm_map_fork. 1197 */ 1198 int 1199 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1200 vm_inherit_t new_inheritance) 1201 { 1202 vm_map_entry_t entry; 1203 vm_map_entry_t temp_entry; 1204 1205 switch (new_inheritance) { 1206 case VM_INHERIT_NONE: 1207 case VM_INHERIT_COPY: 1208 case VM_INHERIT_SHARE: 1209 break; 1210 default: 1211 return (KERN_INVALID_ARGUMENT); 1212 } 1213 1214 vm_map_lock(map); 1215 1216 VM_MAP_RANGE_CHECK(map, start, end); 1217 1218 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1219 entry = temp_entry; 1220 vm_map_clip_start(map, entry, start); 1221 } else 1222 entry = temp_entry->next; 1223 1224 while ((entry != &map->header) && (entry->start < end)) { 1225 vm_map_clip_end(map, entry, end); 1226 1227 entry->inheritance = new_inheritance; 1228 1229 vm_map_simplify_entry(map, entry); 1230 1231 entry = entry->next; 1232 } 1233 1234 vm_map_unlock(map); 1235 return (KERN_SUCCESS); 1236 } 1237 1238 /* 1239 * Implement the semantics of mlock 1240 */ 1241 int 1242 vm_map_user_pageable(map, start, end, new_pageable) 1243 vm_map_t map; 1244 vm_offset_t start; 1245 vm_offset_t end; 1246 boolean_t new_pageable; 1247 { 1248 vm_map_entry_t entry; 1249 vm_map_entry_t start_entry; 1250 vm_offset_t estart; 1251 int rv; 1252 1253 vm_map_lock(map); 1254 VM_MAP_RANGE_CHECK(map, start, end); 1255 1256 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1257 vm_map_unlock(map); 1258 return (KERN_INVALID_ADDRESS); 1259 } 1260 1261 if (new_pageable) { 1262 1263 entry = start_entry; 1264 vm_map_clip_start(map, entry, start); 1265 1266 /* 1267 * Now decrement the wiring count for each region. If a region 1268 * becomes completely unwired, unwire its physical pages and 1269 * mappings. 1270 */ 1271 while ((entry != &map->header) && (entry->start < end)) { 1272 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1273 vm_map_clip_end(map, entry, end); 1274 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1275 entry->wired_count--; 1276 if (entry->wired_count == 0) 1277 vm_fault_unwire(map, entry->start, entry->end); 1278 } 1279 vm_map_simplify_entry(map,entry); 1280 entry = entry->next; 1281 } 1282 } else { 1283 1284 entry = start_entry; 1285 1286 while ((entry != &map->header) && (entry->start < end)) { 1287 1288 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1289 entry = entry->next; 1290 continue; 1291 } 1292 1293 if (entry->wired_count != 0) { 1294 entry->wired_count++; 1295 entry->eflags |= MAP_ENTRY_USER_WIRED; 1296 entry = entry->next; 1297 continue; 1298 } 1299 1300 /* Here on entry being newly wired */ 1301 1302 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1303 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1304 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1305 1306 vm_object_shadow(&entry->object.vm_object, 1307 &entry->offset, 1308 atop(entry->end - entry->start)); 1309 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1310 1311 } else if (entry->object.vm_object == NULL && 1312 !map->system_map) { 1313 1314 entry->object.vm_object = 1315 vm_object_allocate(OBJT_DEFAULT, 1316 atop(entry->end - entry->start)); 1317 entry->offset = (vm_offset_t) 0; 1318 1319 } 1320 } 1321 1322 vm_map_clip_start(map, entry, start); 1323 vm_map_clip_end(map, entry, end); 1324 1325 entry->wired_count++; 1326 entry->eflags |= MAP_ENTRY_USER_WIRED; 1327 estart = entry->start; 1328 1329 /* First we need to allow map modifications */ 1330 vm_map_set_recursive(map); 1331 vm_map_lock_downgrade(map); 1332 map->timestamp++; 1333 1334 rv = vm_fault_user_wire(map, entry->start, entry->end); 1335 if (rv) { 1336 1337 entry->wired_count--; 1338 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1339 1340 vm_map_clear_recursive(map); 1341 vm_map_unlock(map); 1342 1343 (void) vm_map_user_pageable(map, start, entry->start, TRUE); 1344 return rv; 1345 } 1346 1347 vm_map_clear_recursive(map); 1348 if (vm_map_lock_upgrade(map)) { 1349 vm_map_lock(map); 1350 if (vm_map_lookup_entry(map, estart, &entry) 1351 == FALSE) { 1352 vm_map_unlock(map); 1353 (void) vm_map_user_pageable(map, 1354 start, 1355 estart, 1356 TRUE); 1357 return (KERN_INVALID_ADDRESS); 1358 } 1359 } 1360 vm_map_simplify_entry(map,entry); 1361 } 1362 } 1363 map->timestamp++; 1364 vm_map_unlock(map); 1365 return KERN_SUCCESS; 1366 } 1367 1368 /* 1369 * vm_map_pageable: 1370 * 1371 * Sets the pageability of the specified address 1372 * range in the target map. Regions specified 1373 * as not pageable require locked-down physical 1374 * memory and physical page maps. 1375 * 1376 * The map must not be locked, but a reference 1377 * must remain to the map throughout the call. 1378 */ 1379 int 1380 vm_map_pageable(map, start, end, new_pageable) 1381 vm_map_t map; 1382 vm_offset_t start; 1383 vm_offset_t end; 1384 boolean_t new_pageable; 1385 { 1386 vm_map_entry_t entry; 1387 vm_map_entry_t start_entry; 1388 vm_offset_t failed = 0; 1389 int rv; 1390 1391 vm_map_lock(map); 1392 1393 VM_MAP_RANGE_CHECK(map, start, end); 1394 1395 /* 1396 * Only one pageability change may take place at one time, since 1397 * vm_fault assumes it will be called only once for each 1398 * wiring/unwiring. Therefore, we have to make sure we're actually 1399 * changing the pageability for the entire region. We do so before 1400 * making any changes. 1401 */ 1402 1403 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { 1404 vm_map_unlock(map); 1405 return (KERN_INVALID_ADDRESS); 1406 } 1407 entry = start_entry; 1408 1409 /* 1410 * Actions are rather different for wiring and unwiring, so we have 1411 * two separate cases. 1412 */ 1413 1414 if (new_pageable) { 1415 1416 vm_map_clip_start(map, entry, start); 1417 1418 /* 1419 * Unwiring. First ensure that the range to be unwired is 1420 * really wired down and that there are no holes. 1421 */ 1422 while ((entry != &map->header) && (entry->start < end)) { 1423 1424 if (entry->wired_count == 0 || 1425 (entry->end < end && 1426 (entry->next == &map->header || 1427 entry->next->start > entry->end))) { 1428 vm_map_unlock(map); 1429 return (KERN_INVALID_ARGUMENT); 1430 } 1431 entry = entry->next; 1432 } 1433 1434 /* 1435 * Now decrement the wiring count for each region. If a region 1436 * becomes completely unwired, unwire its physical pages and 1437 * mappings. 1438 */ 1439 entry = start_entry; 1440 while ((entry != &map->header) && (entry->start < end)) { 1441 vm_map_clip_end(map, entry, end); 1442 1443 entry->wired_count--; 1444 if (entry->wired_count == 0) 1445 vm_fault_unwire(map, entry->start, entry->end); 1446 1447 vm_map_simplify_entry(map, entry); 1448 1449 entry = entry->next; 1450 } 1451 } else { 1452 /* 1453 * Wiring. We must do this in two passes: 1454 * 1455 * 1. Holding the write lock, we create any shadow or zero-fill 1456 * objects that need to be created. Then we clip each map 1457 * entry to the region to be wired and increment its wiring 1458 * count. We create objects before clipping the map entries 1459 * to avoid object proliferation. 1460 * 1461 * 2. We downgrade to a read lock, and call vm_fault_wire to 1462 * fault in the pages for any newly wired area (wired_count is 1463 * 1). 1464 * 1465 * Downgrading to a read lock for vm_fault_wire avoids a possible 1466 * deadlock with another process that may have faulted on one 1467 * of the pages to be wired (it would mark the page busy, 1468 * blocking us, then in turn block on the map lock that we 1469 * hold). Because of problems in the recursive lock package, 1470 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1471 * any actions that require the write lock must be done 1472 * beforehand. Because we keep the read lock on the map, the 1473 * copy-on-write status of the entries we modify here cannot 1474 * change. 1475 */ 1476 1477 /* 1478 * Pass 1. 1479 */ 1480 while ((entry != &map->header) && (entry->start < end)) { 1481 if (entry->wired_count == 0) { 1482 1483 /* 1484 * Perform actions of vm_map_lookup that need 1485 * the write lock on the map: create a shadow 1486 * object for a copy-on-write region, or an 1487 * object for a zero-fill region. 1488 * 1489 * We don't have to do this for entries that 1490 * point to sub maps, because we won't 1491 * hold the lock on the sub map. 1492 */ 1493 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1494 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1495 if (copyflag && 1496 ((entry->protection & VM_PROT_WRITE) != 0)) { 1497 1498 vm_object_shadow(&entry->object.vm_object, 1499 &entry->offset, 1500 atop(entry->end - entry->start)); 1501 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1502 } else if (entry->object.vm_object == NULL && 1503 !map->system_map) { 1504 entry->object.vm_object = 1505 vm_object_allocate(OBJT_DEFAULT, 1506 atop(entry->end - entry->start)); 1507 entry->offset = (vm_offset_t) 0; 1508 } 1509 } 1510 } 1511 vm_map_clip_start(map, entry, start); 1512 vm_map_clip_end(map, entry, end); 1513 entry->wired_count++; 1514 1515 /* 1516 * Check for holes 1517 */ 1518 if (entry->end < end && 1519 (entry->next == &map->header || 1520 entry->next->start > entry->end)) { 1521 /* 1522 * Found one. Object creation actions do not 1523 * need to be undone, but the wired counts 1524 * need to be restored. 1525 */ 1526 while (entry != &map->header && entry->end > start) { 1527 entry->wired_count--; 1528 entry = entry->prev; 1529 } 1530 vm_map_unlock(map); 1531 return (KERN_INVALID_ARGUMENT); 1532 } 1533 entry = entry->next; 1534 } 1535 1536 /* 1537 * Pass 2. 1538 */ 1539 1540 /* 1541 * HACK HACK HACK HACK 1542 * 1543 * If we are wiring in the kernel map or a submap of it, 1544 * unlock the map to avoid deadlocks. We trust that the 1545 * kernel is well-behaved, and therefore will not do 1546 * anything destructive to this region of the map while 1547 * we have it unlocked. We cannot trust user processes 1548 * to do the same. 1549 * 1550 * HACK HACK HACK HACK 1551 */ 1552 if (vm_map_pmap(map) == kernel_pmap) { 1553 vm_map_unlock(map); /* trust me ... */ 1554 } else { 1555 vm_map_lock_downgrade(map); 1556 } 1557 1558 rv = 0; 1559 entry = start_entry; 1560 while (entry != &map->header && entry->start < end) { 1561 /* 1562 * If vm_fault_wire fails for any page we need to undo 1563 * what has been done. We decrement the wiring count 1564 * for those pages which have not yet been wired (now) 1565 * and unwire those that have (later). 1566 * 1567 * XXX this violates the locking protocol on the map, 1568 * needs to be fixed. 1569 */ 1570 if (rv) 1571 entry->wired_count--; 1572 else if (entry->wired_count == 1) { 1573 rv = vm_fault_wire(map, entry->start, entry->end); 1574 if (rv) { 1575 failed = entry->start; 1576 entry->wired_count--; 1577 } 1578 } 1579 entry = entry->next; 1580 } 1581 1582 if (vm_map_pmap(map) == kernel_pmap) { 1583 vm_map_lock(map); 1584 } 1585 if (rv) { 1586 vm_map_unlock(map); 1587 (void) vm_map_pageable(map, start, failed, TRUE); 1588 return (rv); 1589 } 1590 vm_map_simplify_entry(map, start_entry); 1591 } 1592 1593 vm_map_unlock(map); 1594 1595 return (KERN_SUCCESS); 1596 } 1597 1598 /* 1599 * vm_map_clean 1600 * 1601 * Push any dirty cached pages in the address range to their pager. 1602 * If syncio is TRUE, dirty pages are written synchronously. 1603 * If invalidate is TRUE, any cached pages are freed as well. 1604 * 1605 * Returns an error if any part of the specified range is not mapped. 1606 */ 1607 int 1608 vm_map_clean(map, start, end, syncio, invalidate) 1609 vm_map_t map; 1610 vm_offset_t start; 1611 vm_offset_t end; 1612 boolean_t syncio; 1613 boolean_t invalidate; 1614 { 1615 vm_map_entry_t current; 1616 vm_map_entry_t entry; 1617 vm_size_t size; 1618 vm_object_t object; 1619 vm_ooffset_t offset; 1620 1621 vm_map_lock_read(map); 1622 VM_MAP_RANGE_CHECK(map, start, end); 1623 if (!vm_map_lookup_entry(map, start, &entry)) { 1624 vm_map_unlock_read(map); 1625 return (KERN_INVALID_ADDRESS); 1626 } 1627 /* 1628 * Make a first pass to check for holes. 1629 */ 1630 for (current = entry; current->start < end; current = current->next) { 1631 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1632 vm_map_unlock_read(map); 1633 return (KERN_INVALID_ARGUMENT); 1634 } 1635 if (end > current->end && 1636 (current->next == &map->header || 1637 current->end != current->next->start)) { 1638 vm_map_unlock_read(map); 1639 return (KERN_INVALID_ADDRESS); 1640 } 1641 } 1642 1643 if (invalidate) 1644 pmap_remove(vm_map_pmap(map), start, end); 1645 /* 1646 * Make a second pass, cleaning/uncaching pages from the indicated 1647 * objects as we go. 1648 */ 1649 for (current = entry; current->start < end; current = current->next) { 1650 offset = current->offset + (start - current->start); 1651 size = (end <= current->end ? end : current->end) - start; 1652 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1653 vm_map_t smap; 1654 vm_map_entry_t tentry; 1655 vm_size_t tsize; 1656 1657 smap = current->object.sub_map; 1658 vm_map_lock_read(smap); 1659 (void) vm_map_lookup_entry(smap, offset, &tentry); 1660 tsize = tentry->end - offset; 1661 if (tsize < size) 1662 size = tsize; 1663 object = tentry->object.vm_object; 1664 offset = tentry->offset + (offset - tentry->start); 1665 vm_map_unlock_read(smap); 1666 } else { 1667 object = current->object.vm_object; 1668 } 1669 /* 1670 * Note that there is absolutely no sense in writing out 1671 * anonymous objects, so we track down the vnode object 1672 * to write out. 1673 * We invalidate (remove) all pages from the address space 1674 * anyway, for semantic correctness. 1675 */ 1676 while (object->backing_object) { 1677 object = object->backing_object; 1678 offset += object->backing_object_offset; 1679 if (object->size < OFF_TO_IDX( offset + size)) 1680 size = IDX_TO_OFF(object->size) - offset; 1681 } 1682 if (object && (object->type == OBJT_VNODE) && 1683 (current->protection & VM_PROT_WRITE)) { 1684 /* 1685 * Flush pages if writing is allowed, invalidate them 1686 * if invalidation requested. Pages undergoing I/O 1687 * will be ignored by vm_object_page_remove(). 1688 * 1689 * We cannot lock the vnode and then wait for paging 1690 * to complete without deadlocking against vm_fault. 1691 * Instead we simply call vm_object_page_remove() and 1692 * allow it to block internally on a page-by-page 1693 * basis when it encounters pages undergoing async 1694 * I/O. 1695 */ 1696 int flags; 1697 1698 vm_object_reference(object); 1699 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1700 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1701 flags |= invalidate ? OBJPC_INVAL : 0; 1702 vm_object_page_clean(object, 1703 OFF_TO_IDX(offset), 1704 OFF_TO_IDX(offset + size + PAGE_MASK), 1705 flags); 1706 if (invalidate) { 1707 /*vm_object_pip_wait(object, "objmcl");*/ 1708 vm_object_page_remove(object, 1709 OFF_TO_IDX(offset), 1710 OFF_TO_IDX(offset + size + PAGE_MASK), 1711 FALSE); 1712 } 1713 VOP_UNLOCK(object->handle, 0, curproc); 1714 vm_object_deallocate(object); 1715 } 1716 start += size; 1717 } 1718 1719 vm_map_unlock_read(map); 1720 return (KERN_SUCCESS); 1721 } 1722 1723 /* 1724 * vm_map_entry_unwire: [ internal use only ] 1725 * 1726 * Make the region specified by this entry pageable. 1727 * 1728 * The map in question should be locked. 1729 * [This is the reason for this routine's existence.] 1730 */ 1731 static void 1732 vm_map_entry_unwire(map, entry) 1733 vm_map_t map; 1734 vm_map_entry_t entry; 1735 { 1736 vm_fault_unwire(map, entry->start, entry->end); 1737 entry->wired_count = 0; 1738 } 1739 1740 /* 1741 * vm_map_entry_delete: [ internal use only ] 1742 * 1743 * Deallocate the given entry from the target map. 1744 */ 1745 static void 1746 vm_map_entry_delete(map, entry) 1747 vm_map_t map; 1748 vm_map_entry_t entry; 1749 { 1750 vm_map_entry_unlink(map, entry); 1751 map->size -= entry->end - entry->start; 1752 1753 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1754 vm_object_deallocate(entry->object.vm_object); 1755 } 1756 1757 vm_map_entry_dispose(map, entry); 1758 } 1759 1760 /* 1761 * vm_map_delete: [ internal use only ] 1762 * 1763 * Deallocates the given address range from the target 1764 * map. 1765 */ 1766 int 1767 vm_map_delete(map, start, end) 1768 vm_map_t map; 1769 vm_offset_t start; 1770 vm_offset_t end; 1771 { 1772 vm_object_t object; 1773 vm_map_entry_t entry; 1774 vm_map_entry_t first_entry; 1775 1776 /* 1777 * Find the start of the region, and clip it 1778 */ 1779 1780 if (!vm_map_lookup_entry(map, start, &first_entry)) 1781 entry = first_entry->next; 1782 else { 1783 entry = first_entry; 1784 vm_map_clip_start(map, entry, start); 1785 /* 1786 * Fix the lookup hint now, rather than each time though the 1787 * loop. 1788 */ 1789 SAVE_HINT(map, entry->prev); 1790 } 1791 1792 /* 1793 * Save the free space hint 1794 */ 1795 1796 if (entry == &map->header) { 1797 map->first_free = &map->header; 1798 } else if (map->first_free->start >= start) { 1799 map->first_free = entry->prev; 1800 } 1801 1802 /* 1803 * Step through all entries in this region 1804 */ 1805 1806 while ((entry != &map->header) && (entry->start < end)) { 1807 vm_map_entry_t next; 1808 vm_offset_t s, e; 1809 vm_pindex_t offidxstart, offidxend, count; 1810 1811 vm_map_clip_end(map, entry, end); 1812 1813 s = entry->start; 1814 e = entry->end; 1815 next = entry->next; 1816 1817 offidxstart = OFF_TO_IDX(entry->offset); 1818 count = OFF_TO_IDX(e - s); 1819 object = entry->object.vm_object; 1820 1821 /* 1822 * Unwire before removing addresses from the pmap; otherwise, 1823 * unwiring will put the entries back in the pmap. 1824 */ 1825 if (entry->wired_count != 0) { 1826 vm_map_entry_unwire(map, entry); 1827 } 1828 1829 offidxend = offidxstart + count; 1830 1831 if ((object == kernel_object) || (object == kmem_object)) { 1832 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 1833 } else { 1834 pmap_remove(map->pmap, s, e); 1835 if (object != NULL && 1836 object->ref_count != 1 && 1837 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 1838 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1839 vm_object_collapse(object); 1840 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 1841 if (object->type == OBJT_SWAP) { 1842 swap_pager_freespace(object, offidxstart, count); 1843 } 1844 if (offidxend >= object->size && 1845 offidxstart < object->size) { 1846 object->size = offidxstart; 1847 } 1848 } 1849 } 1850 1851 /* 1852 * Delete the entry (which may delete the object) only after 1853 * removing all pmap entries pointing to its pages. 1854 * (Otherwise, its page frames may be reallocated, and any 1855 * modify bits will be set in the wrong object!) 1856 */ 1857 vm_map_entry_delete(map, entry); 1858 entry = next; 1859 } 1860 return (KERN_SUCCESS); 1861 } 1862 1863 /* 1864 * vm_map_remove: 1865 * 1866 * Remove the given address range from the target map. 1867 * This is the exported form of vm_map_delete. 1868 */ 1869 int 1870 vm_map_remove(map, start, end) 1871 vm_map_t map; 1872 vm_offset_t start; 1873 vm_offset_t end; 1874 { 1875 int result, s = 0; 1876 1877 if (map == kmem_map || map == mb_map) 1878 s = splvm(); 1879 1880 vm_map_lock(map); 1881 VM_MAP_RANGE_CHECK(map, start, end); 1882 result = vm_map_delete(map, start, end); 1883 vm_map_unlock(map); 1884 1885 if (map == kmem_map || map == mb_map) 1886 splx(s); 1887 1888 return (result); 1889 } 1890 1891 /* 1892 * vm_map_check_protection: 1893 * 1894 * Assert that the target map allows the specified 1895 * privilege on the entire address region given. 1896 * The entire region must be allocated. 1897 */ 1898 boolean_t 1899 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 1900 vm_prot_t protection) 1901 { 1902 vm_map_entry_t entry; 1903 vm_map_entry_t tmp_entry; 1904 1905 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 1906 return (FALSE); 1907 } 1908 entry = tmp_entry; 1909 1910 while (start < end) { 1911 if (entry == &map->header) { 1912 return (FALSE); 1913 } 1914 /* 1915 * No holes allowed! 1916 */ 1917 1918 if (start < entry->start) { 1919 return (FALSE); 1920 } 1921 /* 1922 * Check protection associated with entry. 1923 */ 1924 1925 if ((entry->protection & protection) != protection) { 1926 return (FALSE); 1927 } 1928 /* go to next entry */ 1929 1930 start = entry->end; 1931 entry = entry->next; 1932 } 1933 return (TRUE); 1934 } 1935 1936 /* 1937 * Split the pages in a map entry into a new object. This affords 1938 * easier removal of unused pages, and keeps object inheritance from 1939 * being a negative impact on memory usage. 1940 */ 1941 static void 1942 vm_map_split(entry) 1943 vm_map_entry_t entry; 1944 { 1945 vm_page_t m; 1946 vm_object_t orig_object, new_object, source; 1947 vm_offset_t s, e; 1948 vm_pindex_t offidxstart, offidxend, idx; 1949 vm_size_t size; 1950 vm_ooffset_t offset; 1951 1952 orig_object = entry->object.vm_object; 1953 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1954 return; 1955 if (orig_object->ref_count <= 1) 1956 return; 1957 1958 offset = entry->offset; 1959 s = entry->start; 1960 e = entry->end; 1961 1962 offidxstart = OFF_TO_IDX(offset); 1963 offidxend = offidxstart + OFF_TO_IDX(e - s); 1964 size = offidxend - offidxstart; 1965 1966 new_object = vm_pager_allocate(orig_object->type, 1967 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 1968 if (new_object == NULL) 1969 return; 1970 1971 source = orig_object->backing_object; 1972 if (source != NULL) { 1973 vm_object_reference(source); /* Referenced by new_object */ 1974 TAILQ_INSERT_TAIL(&source->shadow_head, 1975 new_object, shadow_list); 1976 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1977 new_object->backing_object_offset = 1978 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 1979 new_object->backing_object = source; 1980 source->shadow_count++; 1981 source->generation++; 1982 } 1983 1984 for (idx = 0; idx < size; idx++) { 1985 vm_page_t m; 1986 1987 retry: 1988 m = vm_page_lookup(orig_object, offidxstart + idx); 1989 if (m == NULL) 1990 continue; 1991 1992 /* 1993 * We must wait for pending I/O to complete before we can 1994 * rename the page. 1995 * 1996 * We do not have to VM_PROT_NONE the page as mappings should 1997 * not be changed by this operation. 1998 */ 1999 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2000 goto retry; 2001 2002 vm_page_busy(m); 2003 vm_page_rename(m, new_object, idx); 2004 /* page automatically made dirty by rename and cache handled */ 2005 vm_page_busy(m); 2006 } 2007 2008 if (orig_object->type == OBJT_SWAP) { 2009 vm_object_pip_add(orig_object, 1); 2010 /* 2011 * copy orig_object pages into new_object 2012 * and destroy unneeded pages in 2013 * shadow object. 2014 */ 2015 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2016 vm_object_pip_wakeup(orig_object); 2017 } 2018 2019 for (idx = 0; idx < size; idx++) { 2020 m = vm_page_lookup(new_object, idx); 2021 if (m) { 2022 vm_page_wakeup(m); 2023 } 2024 } 2025 2026 entry->object.vm_object = new_object; 2027 entry->offset = 0LL; 2028 vm_object_deallocate(orig_object); 2029 } 2030 2031 /* 2032 * vm_map_copy_entry: 2033 * 2034 * Copies the contents of the source entry to the destination 2035 * entry. The entries *must* be aligned properly. 2036 */ 2037 static void 2038 vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) 2039 vm_map_t src_map, dst_map; 2040 vm_map_entry_t src_entry, dst_entry; 2041 { 2042 vm_object_t src_object; 2043 2044 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2045 return; 2046 2047 if (src_entry->wired_count == 0) { 2048 2049 /* 2050 * If the source entry is marked needs_copy, it is already 2051 * write-protected. 2052 */ 2053 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2054 pmap_protect(src_map->pmap, 2055 src_entry->start, 2056 src_entry->end, 2057 src_entry->protection & ~VM_PROT_WRITE); 2058 } 2059 2060 /* 2061 * Make a copy of the object. 2062 */ 2063 if ((src_object = src_entry->object.vm_object) != NULL) { 2064 2065 if ((src_object->handle == NULL) && 2066 (src_object->type == OBJT_DEFAULT || 2067 src_object->type == OBJT_SWAP)) { 2068 vm_object_collapse(src_object); 2069 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2070 vm_map_split(src_entry); 2071 src_object = src_entry->object.vm_object; 2072 } 2073 } 2074 2075 vm_object_reference(src_object); 2076 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2077 dst_entry->object.vm_object = src_object; 2078 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2079 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2080 dst_entry->offset = src_entry->offset; 2081 } else { 2082 dst_entry->object.vm_object = NULL; 2083 dst_entry->offset = 0; 2084 } 2085 2086 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2087 dst_entry->end - dst_entry->start, src_entry->start); 2088 } else { 2089 /* 2090 * Of course, wired down pages can't be set copy-on-write. 2091 * Cause wired pages to be copied into the new map by 2092 * simulating faults (the new pages are pageable) 2093 */ 2094 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2095 } 2096 } 2097 2098 /* 2099 * vmspace_fork: 2100 * Create a new process vmspace structure and vm_map 2101 * based on those of an existing process. The new map 2102 * is based on the old map, according to the inheritance 2103 * values on the regions in that map. 2104 * 2105 * The source map must not be locked. 2106 */ 2107 struct vmspace * 2108 vmspace_fork(vm1) 2109 struct vmspace *vm1; 2110 { 2111 struct vmspace *vm2; 2112 vm_map_t old_map = &vm1->vm_map; 2113 vm_map_t new_map; 2114 vm_map_entry_t old_entry; 2115 vm_map_entry_t new_entry; 2116 vm_object_t object; 2117 2118 vm_map_lock(old_map); 2119 2120 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2121 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2122 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2123 new_map = &vm2->vm_map; /* XXX */ 2124 new_map->timestamp = 1; 2125 2126 old_entry = old_map->header.next; 2127 2128 while (old_entry != &old_map->header) { 2129 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2130 panic("vm_map_fork: encountered a submap"); 2131 2132 switch (old_entry->inheritance) { 2133 case VM_INHERIT_NONE: 2134 break; 2135 2136 case VM_INHERIT_SHARE: 2137 /* 2138 * Clone the entry, creating the shared object if necessary. 2139 */ 2140 object = old_entry->object.vm_object; 2141 if (object == NULL) { 2142 object = vm_object_allocate(OBJT_DEFAULT, 2143 atop(old_entry->end - old_entry->start)); 2144 old_entry->object.vm_object = object; 2145 old_entry->offset = (vm_offset_t) 0; 2146 } 2147 2148 /* 2149 * Add the reference before calling vm_object_shadow 2150 * to insure that a shadow object is created. 2151 */ 2152 vm_object_reference(object); 2153 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2154 vm_object_shadow(&old_entry->object.vm_object, 2155 &old_entry->offset, 2156 atop(old_entry->end - old_entry->start)); 2157 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2158 object = old_entry->object.vm_object; 2159 } 2160 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2161 2162 /* 2163 * Clone the entry, referencing the shared object. 2164 */ 2165 new_entry = vm_map_entry_create(new_map); 2166 *new_entry = *old_entry; 2167 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2168 new_entry->wired_count = 0; 2169 2170 /* 2171 * Insert the entry into the new map -- we know we're 2172 * inserting at the end of the new map. 2173 */ 2174 2175 vm_map_entry_link(new_map, new_map->header.prev, 2176 new_entry); 2177 2178 /* 2179 * Update the physical map 2180 */ 2181 2182 pmap_copy(new_map->pmap, old_map->pmap, 2183 new_entry->start, 2184 (old_entry->end - old_entry->start), 2185 old_entry->start); 2186 break; 2187 2188 case VM_INHERIT_COPY: 2189 /* 2190 * Clone the entry and link into the map. 2191 */ 2192 new_entry = vm_map_entry_create(new_map); 2193 *new_entry = *old_entry; 2194 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2195 new_entry->wired_count = 0; 2196 new_entry->object.vm_object = NULL; 2197 vm_map_entry_link(new_map, new_map->header.prev, 2198 new_entry); 2199 vm_map_copy_entry(old_map, new_map, old_entry, 2200 new_entry); 2201 break; 2202 } 2203 old_entry = old_entry->next; 2204 } 2205 2206 new_map->size = old_map->size; 2207 vm_map_unlock(old_map); 2208 2209 return (vm2); 2210 } 2211 2212 int 2213 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2214 vm_prot_t prot, vm_prot_t max, int cow) 2215 { 2216 vm_map_entry_t prev_entry; 2217 vm_map_entry_t new_stack_entry; 2218 vm_size_t init_ssize; 2219 int rv; 2220 2221 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2222 return (KERN_NO_SPACE); 2223 2224 if (max_ssize < SGROWSIZ) 2225 init_ssize = max_ssize; 2226 else 2227 init_ssize = SGROWSIZ; 2228 2229 vm_map_lock(map); 2230 2231 /* If addr is already mapped, no go */ 2232 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2233 vm_map_unlock(map); 2234 return (KERN_NO_SPACE); 2235 } 2236 2237 /* If we can't accomodate max_ssize in the current mapping, 2238 * no go. However, we need to be aware that subsequent user 2239 * mappings might map into the space we have reserved for 2240 * stack, and currently this space is not protected. 2241 * 2242 * Hopefully we will at least detect this condition 2243 * when we try to grow the stack. 2244 */ 2245 if ((prev_entry->next != &map->header) && 2246 (prev_entry->next->start < addrbos + max_ssize)) { 2247 vm_map_unlock(map); 2248 return (KERN_NO_SPACE); 2249 } 2250 2251 /* We initially map a stack of only init_ssize. We will 2252 * grow as needed later. Since this is to be a grow 2253 * down stack, we map at the top of the range. 2254 * 2255 * Note: we would normally expect prot and max to be 2256 * VM_PROT_ALL, and cow to be 0. Possibly we should 2257 * eliminate these as input parameters, and just 2258 * pass these values here in the insert call. 2259 */ 2260 rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize, 2261 addrbos + max_ssize, prot, max, cow); 2262 2263 /* Now set the avail_ssize amount */ 2264 if (rv == KERN_SUCCESS){ 2265 if (prev_entry != &map->header) 2266 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize); 2267 new_stack_entry = prev_entry->next; 2268 if (new_stack_entry->end != addrbos + max_ssize || 2269 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2270 panic ("Bad entry start/end for new stack entry"); 2271 else 2272 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2273 } 2274 2275 vm_map_unlock(map); 2276 return (rv); 2277 } 2278 2279 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2280 * desired address is already mapped, or if we successfully grow 2281 * the stack. Also returns KERN_SUCCESS if addr is outside the 2282 * stack range (this is strange, but preserves compatibility with 2283 * the grow function in vm_machdep.c). 2284 */ 2285 int 2286 vm_map_growstack (struct proc *p, vm_offset_t addr) 2287 { 2288 vm_map_entry_t prev_entry; 2289 vm_map_entry_t stack_entry; 2290 vm_map_entry_t new_stack_entry; 2291 struct vmspace *vm = p->p_vmspace; 2292 vm_map_t map = &vm->vm_map; 2293 vm_offset_t end; 2294 int grow_amount; 2295 int rv; 2296 int is_procstack; 2297 Retry: 2298 vm_map_lock_read(map); 2299 2300 /* If addr is already in the entry range, no need to grow.*/ 2301 if (vm_map_lookup_entry(map, addr, &prev_entry)) { 2302 vm_map_unlock_read(map); 2303 return (KERN_SUCCESS); 2304 } 2305 2306 if ((stack_entry = prev_entry->next) == &map->header) { 2307 vm_map_unlock_read(map); 2308 return (KERN_SUCCESS); 2309 } 2310 if (prev_entry == &map->header) 2311 end = stack_entry->start - stack_entry->avail_ssize; 2312 else 2313 end = prev_entry->end; 2314 2315 /* This next test mimics the old grow function in vm_machdep.c. 2316 * It really doesn't quite make sense, but we do it anyway 2317 * for compatibility. 2318 * 2319 * If not growable stack, return success. This signals the 2320 * caller to proceed as he would normally with normal vm. 2321 */ 2322 if (stack_entry->avail_ssize < 1 || 2323 addr >= stack_entry->start || 2324 addr < stack_entry->start - stack_entry->avail_ssize) { 2325 vm_map_unlock_read(map); 2326 return (KERN_SUCCESS); 2327 } 2328 2329 /* Find the minimum grow amount */ 2330 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2331 if (grow_amount > stack_entry->avail_ssize) { 2332 vm_map_unlock_read(map); 2333 return (KERN_NO_SPACE); 2334 } 2335 2336 /* If there is no longer enough space between the entries 2337 * nogo, and adjust the available space. Note: this 2338 * should only happen if the user has mapped into the 2339 * stack area after the stack was created, and is 2340 * probably an error. 2341 * 2342 * This also effectively destroys any guard page the user 2343 * might have intended by limiting the stack size. 2344 */ 2345 if (grow_amount > stack_entry->start - end) { 2346 if (vm_map_lock_upgrade(map)) 2347 goto Retry; 2348 2349 stack_entry->avail_ssize = stack_entry->start - end; 2350 2351 vm_map_unlock(map); 2352 return (KERN_NO_SPACE); 2353 } 2354 2355 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2356 2357 /* If this is the main process stack, see if we're over the 2358 * stack limit. 2359 */ 2360 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2361 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2362 vm_map_unlock_read(map); 2363 return (KERN_NO_SPACE); 2364 } 2365 2366 /* Round up the grow amount modulo SGROWSIZ */ 2367 grow_amount = roundup (grow_amount, SGROWSIZ); 2368 if (grow_amount > stack_entry->avail_ssize) { 2369 grow_amount = stack_entry->avail_ssize; 2370 } 2371 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2372 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2373 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2374 ctob(vm->vm_ssize); 2375 } 2376 2377 if (vm_map_lock_upgrade(map)) 2378 goto Retry; 2379 2380 /* Get the preliminary new entry start value */ 2381 addr = stack_entry->start - grow_amount; 2382 2383 /* If this puts us into the previous entry, cut back our growth 2384 * to the available space. Also, see the note above. 2385 */ 2386 if (addr < end) { 2387 stack_entry->avail_ssize = stack_entry->start - end; 2388 addr = end; 2389 } 2390 2391 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, 2392 VM_PROT_ALL, 2393 VM_PROT_ALL, 2394 0); 2395 2396 /* Adjust the available stack space by the amount we grew. */ 2397 if (rv == KERN_SUCCESS) { 2398 if (prev_entry != &map->header) 2399 vm_map_clip_end(map, prev_entry, addr); 2400 new_stack_entry = prev_entry->next; 2401 if (new_stack_entry->end != stack_entry->start || 2402 new_stack_entry->start != addr) 2403 panic ("Bad stack grow start/end in new stack entry"); 2404 else { 2405 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2406 (new_stack_entry->end - 2407 new_stack_entry->start); 2408 if (is_procstack) 2409 vm->vm_ssize += btoc(new_stack_entry->end - 2410 new_stack_entry->start); 2411 } 2412 } 2413 2414 vm_map_unlock(map); 2415 return (rv); 2416 2417 } 2418 2419 /* 2420 * Unshare the specified VM space for exec. If other processes are 2421 * mapped to it, then create a new one. The new vmspace is null. 2422 */ 2423 2424 void 2425 vmspace_exec(struct proc *p) { 2426 struct vmspace *oldvmspace = p->p_vmspace; 2427 struct vmspace *newvmspace; 2428 vm_map_t map = &p->p_vmspace->vm_map; 2429 2430 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2431 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2432 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2433 /* 2434 * This code is written like this for prototype purposes. The 2435 * goal is to avoid running down the vmspace here, but let the 2436 * other process's that are still using the vmspace to finally 2437 * run it down. Even though there is little or no chance of blocking 2438 * here, it is a good idea to keep this form for future mods. 2439 */ 2440 vmspace_free(oldvmspace); 2441 p->p_vmspace = newvmspace; 2442 pmap_pinit2(vmspace_pmap(newvmspace)); 2443 if (p == curproc) 2444 pmap_activate(p); 2445 } 2446 2447 /* 2448 * Unshare the specified VM space for forcing COW. This 2449 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 2450 */ 2451 2452 void 2453 vmspace_unshare(struct proc *p) { 2454 struct vmspace *oldvmspace = p->p_vmspace; 2455 struct vmspace *newvmspace; 2456 2457 if (oldvmspace->vm_refcnt == 1) 2458 return; 2459 newvmspace = vmspace_fork(oldvmspace); 2460 vmspace_free(oldvmspace); 2461 p->p_vmspace = newvmspace; 2462 pmap_pinit2(vmspace_pmap(newvmspace)); 2463 if (p == curproc) 2464 pmap_activate(p); 2465 } 2466 2467 2468 /* 2469 * vm_map_lookup: 2470 * 2471 * Finds the VM object, offset, and 2472 * protection for a given virtual address in the 2473 * specified map, assuming a page fault of the 2474 * type specified. 2475 * 2476 * Leaves the map in question locked for read; return 2477 * values are guaranteed until a vm_map_lookup_done 2478 * call is performed. Note that the map argument 2479 * is in/out; the returned map must be used in 2480 * the call to vm_map_lookup_done. 2481 * 2482 * A handle (out_entry) is returned for use in 2483 * vm_map_lookup_done, to make that fast. 2484 * 2485 * If a lookup is requested with "write protection" 2486 * specified, the map may be changed to perform virtual 2487 * copying operations, although the data referenced will 2488 * remain the same. 2489 */ 2490 int 2491 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 2492 vm_offset_t vaddr, 2493 vm_prot_t fault_typea, 2494 vm_map_entry_t *out_entry, /* OUT */ 2495 vm_object_t *object, /* OUT */ 2496 vm_pindex_t *pindex, /* OUT */ 2497 vm_prot_t *out_prot, /* OUT */ 2498 boolean_t *wired) /* OUT */ 2499 { 2500 vm_map_entry_t entry; 2501 vm_map_t map = *var_map; 2502 vm_prot_t prot; 2503 vm_prot_t fault_type = fault_typea; 2504 2505 RetryLookup:; 2506 2507 /* 2508 * Lookup the faulting address. 2509 */ 2510 2511 vm_map_lock_read(map); 2512 2513 #define RETURN(why) \ 2514 { \ 2515 vm_map_unlock_read(map); \ 2516 return(why); \ 2517 } 2518 2519 /* 2520 * If the map has an interesting hint, try it before calling full 2521 * blown lookup routine. 2522 */ 2523 2524 entry = map->hint; 2525 2526 *out_entry = entry; 2527 2528 if ((entry == &map->header) || 2529 (vaddr < entry->start) || (vaddr >= entry->end)) { 2530 vm_map_entry_t tmp_entry; 2531 2532 /* 2533 * Entry was either not a valid hint, or the vaddr was not 2534 * contained in the entry, so do a full lookup. 2535 */ 2536 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) 2537 RETURN(KERN_INVALID_ADDRESS); 2538 2539 entry = tmp_entry; 2540 *out_entry = entry; 2541 } 2542 2543 /* 2544 * Handle submaps. 2545 */ 2546 2547 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2548 vm_map_t old_map = map; 2549 2550 *var_map = map = entry->object.sub_map; 2551 vm_map_unlock_read(old_map); 2552 goto RetryLookup; 2553 } 2554 2555 /* 2556 * Check whether this task is allowed to have this page. 2557 * Note the special case for MAP_ENTRY_COW 2558 * pages with an override. This is to implement a forced 2559 * COW for debuggers. 2560 */ 2561 2562 if (fault_type & VM_PROT_OVERRIDE_WRITE) 2563 prot = entry->max_protection; 2564 else 2565 prot = entry->protection; 2566 2567 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 2568 if ((fault_type & prot) != fault_type) { 2569 RETURN(KERN_PROTECTION_FAILURE); 2570 } 2571 2572 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 2573 (entry->eflags & MAP_ENTRY_COW) && 2574 (fault_type & VM_PROT_WRITE) && 2575 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 2576 RETURN(KERN_PROTECTION_FAILURE); 2577 } 2578 2579 /* 2580 * If this page is not pageable, we have to get it for all possible 2581 * accesses. 2582 */ 2583 2584 *wired = (entry->wired_count != 0); 2585 if (*wired) 2586 prot = fault_type = entry->protection; 2587 2588 /* 2589 * If the entry was copy-on-write, we either ... 2590 */ 2591 2592 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2593 /* 2594 * If we want to write the page, we may as well handle that 2595 * now since we've got the map locked. 2596 * 2597 * If we don't need to write the page, we just demote the 2598 * permissions allowed. 2599 */ 2600 2601 if (fault_type & VM_PROT_WRITE) { 2602 /* 2603 * Make a new object, and place it in the object 2604 * chain. Note that no new references have appeared 2605 * -- one just moved from the map to the new 2606 * object. 2607 */ 2608 2609 if (vm_map_lock_upgrade(map)) 2610 goto RetryLookup; 2611 2612 vm_object_shadow( 2613 &entry->object.vm_object, 2614 &entry->offset, 2615 atop(entry->end - entry->start)); 2616 2617 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2618 vm_map_lock_downgrade(map); 2619 } else { 2620 /* 2621 * We're attempting to read a copy-on-write page -- 2622 * don't allow writes. 2623 */ 2624 2625 prot &= ~VM_PROT_WRITE; 2626 } 2627 } 2628 2629 /* 2630 * Create an object if necessary. 2631 */ 2632 if (entry->object.vm_object == NULL && 2633 !map->system_map) { 2634 if (vm_map_lock_upgrade(map)) 2635 goto RetryLookup; 2636 2637 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 2638 atop(entry->end - entry->start)); 2639 entry->offset = 0; 2640 vm_map_lock_downgrade(map); 2641 } 2642 2643 /* 2644 * Return the object/offset from this entry. If the entry was 2645 * copy-on-write or empty, it has been fixed up. 2646 */ 2647 2648 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 2649 *object = entry->object.vm_object; 2650 2651 /* 2652 * Return whether this is the only map sharing this data. 2653 */ 2654 2655 *out_prot = prot; 2656 return (KERN_SUCCESS); 2657 2658 #undef RETURN 2659 } 2660 2661 /* 2662 * vm_map_lookup_done: 2663 * 2664 * Releases locks acquired by a vm_map_lookup 2665 * (according to the handle returned by that lookup). 2666 */ 2667 2668 void 2669 vm_map_lookup_done(map, entry) 2670 vm_map_t map; 2671 vm_map_entry_t entry; 2672 { 2673 /* 2674 * Unlock the main-level map 2675 */ 2676 2677 vm_map_unlock_read(map); 2678 } 2679 2680 /* 2681 * Implement uiomove with VM operations. This handles (and collateral changes) 2682 * support every combination of source object modification, and COW type 2683 * operations. 2684 */ 2685 int 2686 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages) 2687 vm_map_t mapa; 2688 vm_object_t srcobject; 2689 off_t cp; 2690 int cnta; 2691 vm_offset_t uaddra; 2692 int *npages; 2693 { 2694 vm_map_t map; 2695 vm_object_t first_object, oldobject, object; 2696 vm_map_entry_t entry; 2697 vm_prot_t prot; 2698 boolean_t wired; 2699 int tcnt, rv; 2700 vm_offset_t uaddr, start, end, tend; 2701 vm_pindex_t first_pindex, osize, oindex; 2702 off_t ooffset; 2703 int cnt; 2704 2705 if (npages) 2706 *npages = 0; 2707 2708 cnt = cnta; 2709 uaddr = uaddra; 2710 2711 while (cnt > 0) { 2712 map = mapa; 2713 2714 if ((vm_map_lookup(&map, uaddr, 2715 VM_PROT_READ, &entry, &first_object, 2716 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 2717 return EFAULT; 2718 } 2719 2720 vm_map_clip_start(map, entry, uaddr); 2721 2722 tcnt = cnt; 2723 tend = uaddr + tcnt; 2724 if (tend > entry->end) { 2725 tcnt = entry->end - uaddr; 2726 tend = entry->end; 2727 } 2728 2729 vm_map_clip_end(map, entry, tend); 2730 2731 start = entry->start; 2732 end = entry->end; 2733 2734 osize = atop(tcnt); 2735 2736 oindex = OFF_TO_IDX(cp); 2737 if (npages) { 2738 vm_pindex_t idx; 2739 for (idx = 0; idx < osize; idx++) { 2740 vm_page_t m; 2741 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 2742 vm_map_lookup_done(map, entry); 2743 return 0; 2744 } 2745 /* 2746 * disallow busy or invalid pages, but allow 2747 * m->busy pages if they are entirely valid. 2748 */ 2749 if ((m->flags & PG_BUSY) || 2750 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 2751 vm_map_lookup_done(map, entry); 2752 return 0; 2753 } 2754 } 2755 } 2756 2757 /* 2758 * If we are changing an existing map entry, just redirect 2759 * the object, and change mappings. 2760 */ 2761 if ((first_object->type == OBJT_VNODE) && 2762 ((oldobject = entry->object.vm_object) == first_object)) { 2763 2764 if ((entry->offset != cp) || (oldobject != srcobject)) { 2765 /* 2766 * Remove old window into the file 2767 */ 2768 pmap_remove (map->pmap, uaddr, tend); 2769 2770 /* 2771 * Force copy on write for mmaped regions 2772 */ 2773 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2774 2775 /* 2776 * Point the object appropriately 2777 */ 2778 if (oldobject != srcobject) { 2779 2780 /* 2781 * Set the object optimization hint flag 2782 */ 2783 vm_object_set_flag(srcobject, OBJ_OPT); 2784 vm_object_reference(srcobject); 2785 entry->object.vm_object = srcobject; 2786 2787 if (oldobject) { 2788 vm_object_deallocate(oldobject); 2789 } 2790 } 2791 2792 entry->offset = cp; 2793 map->timestamp++; 2794 } else { 2795 pmap_remove (map->pmap, uaddr, tend); 2796 } 2797 2798 } else if ((first_object->ref_count == 1) && 2799 (first_object->size == osize) && 2800 ((first_object->type == OBJT_DEFAULT) || 2801 (first_object->type == OBJT_SWAP)) ) { 2802 2803 oldobject = first_object->backing_object; 2804 2805 if ((first_object->backing_object_offset != cp) || 2806 (oldobject != srcobject)) { 2807 /* 2808 * Remove old window into the file 2809 */ 2810 pmap_remove (map->pmap, uaddr, tend); 2811 2812 /* 2813 * Remove unneeded old pages 2814 */ 2815 vm_object_page_remove(first_object, 0, 0, 0); 2816 2817 /* 2818 * Invalidate swap space 2819 */ 2820 if (first_object->type == OBJT_SWAP) { 2821 swap_pager_freespace(first_object, 2822 0, 2823 first_object->size); 2824 } 2825 2826 /* 2827 * Force copy on write for mmaped regions 2828 */ 2829 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2830 2831 /* 2832 * Point the object appropriately 2833 */ 2834 if (oldobject != srcobject) { 2835 2836 /* 2837 * Set the object optimization hint flag 2838 */ 2839 vm_object_set_flag(srcobject, OBJ_OPT); 2840 vm_object_reference(srcobject); 2841 2842 if (oldobject) { 2843 TAILQ_REMOVE(&oldobject->shadow_head, 2844 first_object, shadow_list); 2845 oldobject->shadow_count--; 2846 /* XXX bump generation? */ 2847 vm_object_deallocate(oldobject); 2848 } 2849 2850 TAILQ_INSERT_TAIL(&srcobject->shadow_head, 2851 first_object, shadow_list); 2852 srcobject->shadow_count++; 2853 /* XXX bump generation? */ 2854 2855 first_object->backing_object = srcobject; 2856 } 2857 first_object->backing_object_offset = cp; 2858 map->timestamp++; 2859 } else { 2860 pmap_remove (map->pmap, uaddr, tend); 2861 } 2862 /* 2863 * Otherwise, we have to do a logical mmap. 2864 */ 2865 } else { 2866 2867 vm_object_set_flag(srcobject, OBJ_OPT); 2868 vm_object_reference(srcobject); 2869 2870 pmap_remove (map->pmap, uaddr, tend); 2871 2872 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 2873 vm_map_lock_upgrade(map); 2874 2875 if (entry == &map->header) { 2876 map->first_free = &map->header; 2877 } else if (map->first_free->start >= start) { 2878 map->first_free = entry->prev; 2879 } 2880 2881 SAVE_HINT(map, entry->prev); 2882 vm_map_entry_delete(map, entry); 2883 2884 object = srcobject; 2885 ooffset = cp; 2886 2887 rv = vm_map_insert(map, object, ooffset, start, tend, 2888 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 2889 2890 if (rv != KERN_SUCCESS) 2891 panic("vm_uiomove: could not insert new entry: %d", rv); 2892 } 2893 2894 /* 2895 * Map the window directly, if it is already in memory 2896 */ 2897 pmap_object_init_pt(map->pmap, uaddr, 2898 srcobject, oindex, tcnt, 0); 2899 2900 map->timestamp++; 2901 vm_map_unlock(map); 2902 2903 cnt -= tcnt; 2904 uaddr += tcnt; 2905 cp += tcnt; 2906 if (npages) 2907 *npages += osize; 2908 } 2909 return 0; 2910 } 2911 2912 /* 2913 * Performs the copy_on_write operations necessary to allow the virtual copies 2914 * into user space to work. This has to be called for write(2) system calls 2915 * from other processes, file unlinking, and file size shrinkage. 2916 */ 2917 void 2918 vm_freeze_copyopts(object, froma, toa) 2919 vm_object_t object; 2920 vm_pindex_t froma, toa; 2921 { 2922 int rv; 2923 vm_object_t robject; 2924 vm_pindex_t idx; 2925 2926 if ((object == NULL) || 2927 ((object->flags & OBJ_OPT) == 0)) 2928 return; 2929 2930 if (object->shadow_count > object->ref_count) 2931 panic("vm_freeze_copyopts: sc > rc"); 2932 2933 while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 2934 vm_pindex_t bo_pindex; 2935 vm_page_t m_in, m_out; 2936 2937 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 2938 2939 vm_object_reference(robject); 2940 2941 vm_object_pip_wait(robject, "objfrz"); 2942 2943 if (robject->ref_count == 1) { 2944 vm_object_deallocate(robject); 2945 continue; 2946 } 2947 2948 vm_object_pip_add(robject, 1); 2949 2950 for (idx = 0; idx < robject->size; idx++) { 2951 2952 m_out = vm_page_grab(robject, idx, 2953 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 2954 2955 if (m_out->valid == 0) { 2956 m_in = vm_page_grab(object, bo_pindex + idx, 2957 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 2958 if (m_in->valid == 0) { 2959 rv = vm_pager_get_pages(object, &m_in, 1, 0); 2960 if (rv != VM_PAGER_OK) { 2961 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 2962 continue; 2963 } 2964 vm_page_deactivate(m_in); 2965 } 2966 2967 vm_page_protect(m_in, VM_PROT_NONE); 2968 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 2969 m_out->valid = m_in->valid; 2970 vm_page_dirty(m_out); 2971 vm_page_activate(m_out); 2972 vm_page_wakeup(m_in); 2973 } 2974 vm_page_wakeup(m_out); 2975 } 2976 2977 object->shadow_count--; 2978 object->ref_count--; 2979 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 2980 robject->backing_object = NULL; 2981 robject->backing_object_offset = 0; 2982 2983 vm_object_pip_wakeup(robject); 2984 vm_object_deallocate(robject); 2985 } 2986 2987 vm_object_clear_flag(object, OBJ_OPT); 2988 } 2989 2990 #include "opt_ddb.h" 2991 #ifdef DDB 2992 #include <sys/kernel.h> 2993 2994 #include <ddb/ddb.h> 2995 2996 /* 2997 * vm_map_print: [ debug ] 2998 */ 2999 DB_SHOW_COMMAND(map, vm_map_print) 3000 { 3001 static int nlines; 3002 /* XXX convert args. */ 3003 vm_map_t map = (vm_map_t)addr; 3004 boolean_t full = have_addr; 3005 3006 vm_map_entry_t entry; 3007 3008 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3009 (void *)map, 3010 (void *)map->pmap, map->nentries, map->timestamp); 3011 nlines++; 3012 3013 if (!full && db_indent) 3014 return; 3015 3016 db_indent += 2; 3017 for (entry = map->header.next; entry != &map->header; 3018 entry = entry->next) { 3019 db_iprintf("map entry %p: start=%p, end=%p\n", 3020 (void *)entry, (void *)entry->start, (void *)entry->end); 3021 nlines++; 3022 { 3023 static char *inheritance_name[4] = 3024 {"share", "copy", "none", "donate_copy"}; 3025 3026 db_iprintf(" prot=%x/%x/%s", 3027 entry->protection, 3028 entry->max_protection, 3029 inheritance_name[(int)(unsigned char)entry->inheritance]); 3030 if (entry->wired_count != 0) 3031 db_printf(", wired"); 3032 } 3033 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3034 /* XXX no %qd in kernel. Truncate entry->offset. */ 3035 db_printf(", share=%p, offset=0x%lx\n", 3036 (void *)entry->object.sub_map, 3037 (long)entry->offset); 3038 nlines++; 3039 if ((entry->prev == &map->header) || 3040 (entry->prev->object.sub_map != 3041 entry->object.sub_map)) { 3042 db_indent += 2; 3043 vm_map_print((db_expr_t)(intptr_t) 3044 entry->object.sub_map, 3045 full, 0, (char *)0); 3046 db_indent -= 2; 3047 } 3048 } else { 3049 /* XXX no %qd in kernel. Truncate entry->offset. */ 3050 db_printf(", object=%p, offset=0x%lx", 3051 (void *)entry->object.vm_object, 3052 (long)entry->offset); 3053 if (entry->eflags & MAP_ENTRY_COW) 3054 db_printf(", copy (%s)", 3055 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3056 db_printf("\n"); 3057 nlines++; 3058 3059 if ((entry->prev == &map->header) || 3060 (entry->prev->object.vm_object != 3061 entry->object.vm_object)) { 3062 db_indent += 2; 3063 vm_object_print((db_expr_t)(intptr_t) 3064 entry->object.vm_object, 3065 full, 0, (char *)0); 3066 nlines += 4; 3067 db_indent -= 2; 3068 } 3069 } 3070 } 3071 db_indent -= 2; 3072 if (db_indent == 0) 3073 nlines = 0; 3074 } 3075 3076 3077 DB_SHOW_COMMAND(procvm, procvm) 3078 { 3079 struct proc *p; 3080 3081 if (have_addr) { 3082 p = (struct proc *) addr; 3083 } else { 3084 p = curproc; 3085 } 3086 3087 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3088 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3089 (void *)vmspace_pmap(p->p_vmspace)); 3090 3091 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3092 } 3093 3094 #endif /* DDB */ 3095