Lines Matching refs:map

102  *	memory from one map to another.
111 * which may not align with existing map entries, all
119 * by copying VM object references from one map to
128 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
133 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
135 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
163 * addresses fall within the valid range of the map.
165 #define VM_MAP_RANGE_CHECK(map, start, end) \
167 if (start < vm_map_min(map)) \
168 start = vm_map_min(map); \
169 if (end > vm_map_max(map)) \
170 end = vm_map_max(map); \
178 * Allocate a new slab for kernel map entries. The kernel map may be locked or
179 * unlocked, depending on whether the request is coming from the kernel map or a
181 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map
198 panic("%s: kernel map is exhausted", __func__);
237 * The worst-case upper bound on the number of kernel map entries that may be
250 * User map and entry structures are allocated from the general purpose
251 * memory pool. Kernel maps are statically defined. Kernel map entries
258 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
261 * Disable the use of per-CPU buckets: map entry allocation is
262 * serialized by the kernel map lock.
268 /* Reserve an extra map entry for use when replenishing the reserve. */
290 vm_map_t map;
293 map = &vm->vm_map;
295 memset(map, 0, sizeof(*map)); /* set MAP_SYSTEM_MAP to false */
296 sx_init(&map->lock, "vm map (user)");
372 * Lock the map, to wait out all other references to it.
523 _vm_map_lock(vm_map_t map, const char *file, int line)
526 if (vm_map_is_system(map))
527 mtx_lock_flags_(&map->system_mtx, 0, file, line);
529 sx_xlock_(&map->lock, file, line);
530 map->timestamp++;
614 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
617 if (vm_map_is_system(map))
618 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
620 sx_assert_(&map->lock, SA_XLOCKED, file, line);
623 #define VM_MAP_ASSERT_LOCKED(map) \
624 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
633 &enable_vmmap_check, 0, "Enable vm map consistency checking");
635 static void _vm_map_assert_consistent(vm_map_t map, int check);
637 #define VM_MAP_ASSERT_CONSISTENT(map) \
638 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
640 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \
641 if (map->nupdates > map->nentries) { \
642 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \
643 map->nupdates = 0; \
647 #define VM_MAP_UNLOCK_CONSISTENT(map)
650 #define VM_MAP_ASSERT_LOCKED(map)
651 #define VM_MAP_ASSERT_CONSISTENT(map)
652 #define VM_MAP_UNLOCK_CONSISTENT(map)
656 _vm_map_unlock(vm_map_t map, const char *file, int line)
659 VM_MAP_UNLOCK_CONSISTENT(map);
660 if (vm_map_is_system(map)) {
662 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
664 map->flags &= ~MAP_REPLENISH;
667 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
669 sx_xunlock_(&map->lock, file, line);
675 _vm_map_lock_read(vm_map_t map, const char *file, int line)
678 if (vm_map_is_system(map))
679 mtx_lock_flags_(&map->system_mtx, 0, file, line);
681 sx_slock_(&map->lock, file, line);
685 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
688 if (vm_map_is_system(map)) {
689 KASSERT((map->flags & MAP_REPLENISH) == 0,
691 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
693 sx_sunlock_(&map->lock, file, line);
699 _vm_map_trylock(vm_map_t map, const char *file, int line)
703 error = vm_map_is_system(map) ?
704 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
705 !sx_try_xlock_(&map->lock, file, line);
707 map->timestamp++;
712 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
716 error = vm_map_is_system(map) ?
717 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
718 !sx_try_slock_(&map->lock, file, line);
725 * Tries to upgrade a read (shared) lock on the specified map to a write
727 * non-zero value if the upgrade fails. If the upgrade fails, the map is
730 * Requires that the map be read locked.
733 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
737 if (vm_map_is_system(map)) {
738 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
740 if (!sx_try_upgrade_(&map->lock, file, line)) {
741 last_timestamp = map->timestamp;
742 sx_sunlock_(&map->lock, file, line);
745 * If the map's timestamp does not change while the
746 * map is unlocked, then the upgrade succeeds.
748 sx_xlock_(&map->lock, file, line);
749 if (last_timestamp != map->timestamp) {
750 sx_xunlock_(&map->lock, file, line);
755 map->timestamp++;
760 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
763 if (vm_map_is_system(map)) {
764 KASSERT((map->flags & MAP_REPLENISH) == 0,
766 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
768 VM_MAP_UNLOCK_CONSISTENT(map);
769 sx_downgrade_(&map->lock, file, line);
777 * on the specified map and the value "0" otherwise.
780 vm_map_locked(vm_map_t map)
783 if (vm_map_is_system(map))
784 return (mtx_owned(&map->system_mtx));
785 return (sx_xlocked(&map->lock));
791 * Atomically releases the lock on the specified map and puts the calling
793 * vm_map_wakeup() is performed on the map or the specified timeout is
797 * objects and map entries. Therefore, the calling thread is expected to
798 * reacquire the map lock after reawakening and later perform an ordinary
800 * operation on the map.
803 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
806 VM_MAP_UNLOCK_CONSISTENT(map);
808 if (vm_map_is_system(map)) {
809 KASSERT((map->flags & MAP_REPLENISH) == 0,
811 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
813 sx_xunlock_(&map->lock, file, line);
815 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
822 * Awaken any threads that have slept on the map using
826 vm_map_wakeup(vm_map_t map)
831 * from being performed (and lost) between the map unlock
836 wakeup(&map->root);
840 vm_map_busy(vm_map_t map)
843 VM_MAP_ASSERT_LOCKED(map);
844 map->busy++;
848 vm_map_unbusy(vm_map_t map)
851 VM_MAP_ASSERT_LOCKED(map);
852 KASSERT(map->busy, ("vm_map_unbusy: not busy"));
853 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
854 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
855 wakeup(&map->busy);
860 vm_map_wait_busy(vm_map_t map)
863 VM_MAP_ASSERT_LOCKED(map);
864 while (map->busy) {
865 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
866 if (vm_map_is_system(map))
867 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
869 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
871 map->timestamp++;
885 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
888 map->header.eflags = MAP_ENTRY_HEADER;
889 map->pmap = pmap;
890 map->header.end = min;
891 map->header.start = max;
892 map->flags = 0;
893 map->header.left = map->header.right = &map->header;
894 map->root = NULL;
895 map->timestamp = 0;
896 map->busy = 0;
897 map->anon_loc = 0;
899 map->nupdates = 0;
904 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
906 _vm_map_init(map, pmap, min, max);
907 sx_init(&map->lock, "vm map (user)");
911 vm_map_init_system(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
913 _vm_map_init(map, pmap, min, max);
914 vm_map_modflags(map, MAP_SYSTEM_MAP, 0);
915 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF |
925 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
927 uma_zfree(vm_map_is_system(map) ? kmapentzone : mapentzone, entry);
933 * Allocates a VM map entry for insertion.
937 vm_map_entry_create(vm_map_t map)
942 if (map == kernel_map) {
943 VM_MAP_ASSERT_LOCKED(map);
946 * A new slab of kernel map entries cannot be allocated at this
947 * point because the kernel map has not yet been updated to
949 * map entry, dipping into the reserve if necessary, and set a
951 * the map is unlocked.
961 if (vm_map_is_system(map)) {
1123 * lists terminated by &map->header. This function, and the subsequent call to
1125 * values in &map->header.
1128 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1133 left = right = &map->header;
1134 root = map->root;
1325 * child, its right pointer points to its successor. The map header node
1326 * is the predecessor of the first map entry, and the successor of the
1332 * The map must be locked, and leaves it so.
1337 vm_map_splay(vm_map_t map, vm_offset_t addr)
1342 header = &map->header;
1343 root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1370 map->root = root;
1371 VM_MAP_ASSERT_CONSISTENT(map);
1385 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1391 "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1392 map->nentries, entry);
1393 VM_MAP_ASSERT_LOCKED(map);
1394 map->nentries++;
1395 header = &map->header;
1396 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1400 * map, so it becomes the new root of the map tree.
1409 * the modified map.
1426 * the modified map.
1440 map->root = entry;
1441 VM_MAP_ASSERT_CONSISTENT(map);
1450 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1456 VM_MAP_ASSERT_LOCKED(map);
1457 header = &map->header;
1458 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1485 map->root = root;
1486 VM_MAP_ASSERT_CONSISTENT(map);
1487 map->nentries--;
1488 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1489 map->nentries, entry);
1498 * The map must be locked, and leaves it so.
1501 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1505 VM_MAP_ASSERT_LOCKED(map);
1506 header = &map->header;
1507 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1514 map->root = root;
1515 VM_MAP_ASSERT_CONSISTENT(map);
1516 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1517 __func__, map, map->nentries, entry);
1523 * Finds the map entry containing (or
1525 * in the given map; the entry is returned
1528 * actually contained in the map.
1532 vm_map_t map,
1540 * If the map is empty, then the map entry immediately preceding
1541 * "address" is the map's header.
1543 header = &map->header;
1544 cur = map->root;
1553 if ((locked = vm_map_locked(map)) ||
1554 sx_try_upgrade(&map->lock)) {
1556 * Splay requires a write lock on the map. However, it only
1558 * change the map. Thus, the map's timestamp need not change
1561 cur = vm_map_splay(map, address);
1563 VM_MAP_UNLOCK_CONSISTENT(map);
1564 sx_downgrade(&map->lock);
1568 * If "address" is contained within a map entry, the new root
1569 * is that map entry. Otherwise, the new root is a map entry
1580 * Since the map is only locked for read access, perform a
1606 * returns the newly inserted map entry in '*res'. In case the new
1612 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1623 VM_MAP_ASSERT_LOCKED(map);
1637 if (start == end || !vm_map_range_valid(map, start, end))
1640 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
1648 if (vm_map_lookup_entry(map, start, &prev_entry))
1713 if (map == kernel_map && end > kernel_vm_end)
1742 * can extend the previous map entry to include the
1753 map->size += end - prev_entry->end;
1754 vm_map_entry_resize(map, prev_entry,
1756 *res = vm_map_try_merge_entries(map, prev_entry,
1763 * map entry, we have to create a new map entry. We
1783 new_entry = vm_map_entry_create(map);
1807 vm_map_entry_link(map, new_entry);
1809 map->size += new_entry->end - new_entry->start;
1817 vm_map_try_merge_entries(map, prev_entry, new_entry);
1818 *res = vm_map_try_merge_entries(map, new_entry, next_entry);
1821 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1831 * Inserts the given VM object into the target map at the
1834 * Requires that the map be locked, and leaves it so.
1840 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1845 return (vm_map_insert1(map, object, offset, start, end, prot, max,
1853 * beginning at address >= start in the given map.
1861 * The map must be locked, and leaves it so.
1864 * vm_map_max(map)-length+1 if insufficient space.
1867 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1873 VM_MAP_ASSERT_LOCKED(map);
1879 start = MAX(start, vm_map_min(map));
1880 if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1881 return (vm_map_max(map) - length + 1);
1884 if (map->root == NULL)
1894 header = &map->header;
1895 root = vm_map_splay_split(map, start, length, &llist, &rlist);
1915 map->root = root;
1916 VM_MAP_ASSERT_CONSISTENT(map);
1922 return (vm_map_max(map) - length + 1);
1953 map->root = root;
1954 VM_MAP_ASSERT_CONSISTENT(map);
1959 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1969 vm_map_lock(map);
1970 VM_MAP_RANGE_CHECK(map, start, end);
1972 result = vm_map_delete(map, start, end);
1977 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1980 result = vm_map_insert(map, object, offset, start, end,
1984 vm_map_unlock(map);
2024 * Searches for the specified amount of free space in the given map with the
2032 * The map must be locked. Initially, there must be at least "length" bytes
2036 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2042 VM_MAP_ASSERT_LOCKED(map);
2044 KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
2072 *addr = vm_map_findspace(map, aligned_addr, length);
2073 if (*addr + length > vm_map_max(map) ||
2089 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
2093 *addr = vm_map_findspace(map, *addr, length);
2094 if (*addr + length > vm_map_max(map) ||
2097 return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr,
2103 * map with the given length. The search is defined to be
2111 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2118 vm_map_lock(map);
2119 rv = vm_map_find_locked(map, object, offset, addr, length, max_addr,
2121 vm_map_unlock(map);
2126 vm_map_find_locked(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2147 en_aslr = (map->flags & MAP_ASLR) != 0;
2149 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2156 (map->flags & MAP_ASLR_IGNSTART) != 0)
2157 curr_min_addr = min_addr = vm_map_min(map);
2160 curr_min_addr = map->anon_loc;
2195 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2196 vm_map_min(map) : min_addr;
2222 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2225 *addr = vm_map_findspace(map, curr_min_addr,
2228 vm_map_max(map))
2235 *addr = vm_map_findspace(map, curr_min_addr, length);
2236 if (*addr + length > vm_map_max(map) ||
2248 (rv = vm_map_alignspace(map, object, offset, addr, length,
2260 if (!vm_map_range_valid(map, *addr, *addr + length))
2262 rv = vm_map_delete(map, *addr, *addr + length);
2267 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2270 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2279 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 ||
2280 *addr < map->anon_loc))
2281 map->anon_loc = *addr;
2298 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2312 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2321 * A map entry with any of the following flags set must not be merged with
2349 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2355 * the vnode has additional references. Thus, the map lock can be
2366 vm_map_entry_dispose(map, entry);
2372 * Compare two map entries that represent consecutive ranges. If
2375 * the map entry that includes the first range.
2377 * The map must be locked.
2380 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2384 VM_MAP_ASSERT_LOCKED(map);
2387 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2388 vm_map_merged_neighbor_dispose(map, prev_entry);
2397 * Allocate an object to back a map entry.
2405 ("map entry %p has backing object", entry));
2407 ("map entry %p is a submap", entry));
2422 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2425 VM_MAP_ASSERT_LOCKED(map);
2427 ("map entry %p is a submap", entry));
2428 if (entry->object.vm_object == NULL && !vm_map_is_system(map) &&
2447 * Create a duplicate map entry for clipping.
2450 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2454 VM_MAP_ASSERT_LOCKED(map);
2458 * objects won't be created after the map entry is split.
2460 vm_map_entry_charge_object(map, entry);
2463 new_entry = vm_map_entry_create(map);
2488 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2493 if (!vm_map_is_system(map))
2495 "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2501 VM_MAP_ASSERT_LOCKED(map);
2511 new_entry = vm_map_entry_clone(map, entry);
2518 vm_map_entry_link(map, new_entry);
2530 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2536 if (!vm_map_is_system(map))
2538 "%s: map %p start 0x%jx prev %p", __func__, map,
2541 if (vm_map_lookup_entry(map, start, prev_entry)) {
2543 rv = vm_map_clip_start(map, entry, start);
2561 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2566 if (!vm_map_is_system(map))
2568 "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2574 VM_MAP_ASSERT_LOCKED(map);
2584 new_entry = vm_map_entry_clone(map, entry);
2591 vm_map_entry_link(map, new_entry);
2599 * Mark the given range as handled by a subordinate map.
2611 * range from the superior map, and then destroy the
2616 vm_map_t map,
2630 vm_map_lock(map);
2631 VM_MAP_RANGE_CHECK(map, start, end);
2632 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2635 result = vm_map_clip_start(map, entry, start);
2638 result = vm_map_clip_end(map, entry, end);
2646 vm_map_unlock(map);
2657 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2664 * Preload the specified map's pmap with mappings to the specified
2674 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2687 pmap_object_init_pt(map->pmap, addr, object, pindex,
2749 pmap_enter_object(map->pmap, start, addr +
2755 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2785 * specified address region in the target map.
2788 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2810 vm_map_lock(map);
2812 if ((map->flags & MAP_WXORX) != 0 &&
2815 vm_map_unlock(map);
2821 * need to fault pages into the map and will drop the map lock while
2823 * update the protection on the map entry in between faults.
2825 vm_map_wait_busy(map);
2827 VM_MAP_RANGE_CHECK(map, start, end);
2829 if (!vm_map_lookup_entry(map, start, &first_entry))
2843 first_entry != vm_map_entry_first(map))
2859 vm_map_unlock(map);
2868 vm_map_unlock(map);
2876 * Postpone the operation until all in-transition map entries have
2884 vm_map_unlock_and_wait(map, 0);
2895 rv = vm_map_clip_start(map, first_entry, start);
2897 vm_map_unlock(map);
2902 rv = vm_map_clip_end(map, entry, end);
2904 vm_map_unlock(map);
2963 vm_map_try_merge_entries(map, prev_entry, entry),
2984 * For user wired map entries, the normal lazy evaluation of
2987 * copy-on-write and enable write access in the physical map.
2992 vm_fault_copy_entry(map, map, entry, entry, NULL);
2995 * When restricting access, update the physical map. Worry
3001 pmap_protect(map->pmap, entry->start,
3007 vm_map_try_merge_entries(map, prev_entry, entry);
3008 vm_map_unlock(map);
3015 * This routine traverses a processes map handling the madvise
3022 vm_map_t map,
3033 * we need to use an exclusive lock on the map and we need to perform
3035 * on the map.
3048 vm_map_lock(map);
3056 vm_map_lock_read(map);
3065 VM_MAP_RANGE_CHECK(map, start, end);
3074 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
3076 vm_map_unlock(map);
3085 rv = vm_map_clip_end(map, entry, end);
3087 vm_map_unlock(map);
3119 vm_map_try_merge_entries(map, prev_entry, entry);
3121 vm_map_try_merge_entries(map, prev_entry, entry);
3122 vm_map_unlock(map);
3133 if (!vm_map_lookup_entry(map, start, &entry))
3146 * we hold the VM map read-locked, neither the
3183 pmap_advise(map->pmap, useStart, useEnd,
3196 vm_map_pmap_enter(map,
3206 vm_map_unlock_read(map);
3215 * range in the target map. Inheritance
3216 * affects how the map will be shared with
3220 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
3237 vm_map_lock(map);
3238 VM_MAP_RANGE_CHECK(map, start, end);
3239 rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry);
3242 if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3243 rv = vm_map_clip_end(map, lentry, end);
3264 vm_map_try_merge_entries(map, prev_entry, entry);
3266 vm_map_try_merge_entries(map, prev_entry, entry);
3268 vm_map_unlock(map);
3275 * Release the map lock, and sleep until the entry is no longer in
3276 * transition. Awake and acquire the map lock. If the map changed while
3281 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3288 VM_MAP_ASSERT_LOCKED(map);
3290 ("not in-tranition map entry %p", in_entry));
3296 last_timestamp = map->timestamp;
3297 if (vm_map_unlock_and_wait(map, 0)) {
3302 vm_map_lock(map);
3303 if (last_timestamp + 1 == map->timestamp)
3307 * Look again for the entry because the map was modified while it was
3311 if (!vm_map_lookup_entry(map, start, &entry)) {
3327 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3338 vm_map_lock(map);
3339 VM_MAP_RANGE_CHECK(map, start, end);
3340 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3344 vm_map_unlock(map);
3354 next_entry = vm_map_entry_in_transition(map, start,
3358 vm_map_unlock(map);
3368 rv = vm_map_clip_start(map, entry, start);
3371 rv = vm_map_clip_end(map, entry, end);
3376 * Mark the entry in case the map lock is released. (See
3381 ("owned map entry %p", entry));
3386 * Check the map for holes in the specified region.
3407 !vm_map_lookup_entry(map, start, &first_entry)) {
3420 * while the map lock was dropped for draining
3436 vm_map_entry_unwire(map, entry);
3452 vm_map_try_merge_entries(map, prev_entry, entry);
3454 vm_map_try_merge_entries(map, prev_entry, entry);
3455 vm_map_unlock(map);
3457 vm_map_wakeup(map);
3488 * The map should be locked.
3491 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3495 VM_MAP_ASSERT_LOCKED(map);
3507 pmap_unwire(map->pmap, entry->start, failed_addr);
3520 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3524 vm_map_lock(map);
3525 rv = vm_map_wire_locked(map, start, end, flags);
3526 vm_map_unlock(map);
3533 * Implements both kernel and user wiring. Returns with the map locked,
3534 * the map lock may be dropped.
3537 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3547 VM_MAP_ASSERT_LOCKED(map);
3556 VM_MAP_RANGE_CHECK(map, start, end);
3557 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3568 next_entry = vm_map_entry_in_transition(map, start,
3580 rv = vm_map_clip_start(map, entry, start);
3583 rv = vm_map_clip_end(map, entry, end);
3588 * Mark the entry in case the map lock is released. (See
3593 ("owned map entry %p", entry));
3609 vm_map_wire_entry_failure(map, entry,
3617 * Release the map lock, relying on the in-transition
3618 * mark. Mark the map busy for fork.
3622 last_timestamp = map->timestamp;
3625 vm_map_busy(map);
3626 vm_map_unlock(map);
3632 * it into the physical map.
3634 rv = vm_fault(map, faddr, VM_PROT_NONE,
3639 vm_map_lock(map);
3640 vm_map_unbusy(map);
3641 if (last_timestamp + 1 != map->timestamp) {
3643 * Look again for the entry because the map was
3648 if (!vm_map_lookup_entry(map, saved_start,
3664 vm_map_wire_entry_failure(map,
3669 vm_map_wire_entry_failure(map, entry, faddr);
3680 * Check the map for holes in the specified region.
3695 !vm_map_lookup_entry(map, start, &first_entry)) {
3708 * while the map lock was dropped for faulting in the
3743 vm_map_entry_unwire(map, entry);
3761 vm_map_try_merge_entries(map, prev_entry, entry);
3763 vm_map_try_merge_entries(map, prev_entry, entry);
3765 vm_map_wakeup(map);
3787 vm_map_t map,
3801 vm_map_lock_read(map);
3802 VM_MAP_RANGE_CHECK(map, start, end);
3803 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3804 vm_map_unlock_read(map);
3818 vm_map_unlock_read(map);
3825 vm_map_unlock_read(map);
3832 vm_map_unlock_read(map);
3838 pmap_remove(map->pmap, start, end);
3866 last_timestamp = map->timestamp;
3867 vm_map_unlock_read(map);
3872 vm_map_lock_read(map);
3873 if (last_timestamp == map->timestamp ||
3874 !vm_map_lookup_entry(map, start, &entry))
3878 vm_map_unlock_read(map);
3887 * The map in question should be locked.
3891 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3895 VM_MAP_ASSERT_LOCKED(map);
3902 pmap_unwire(map->pmap, entry->start, entry->end);
3920 * Deallocate the given entry from the target map.
3923 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3929 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3936 vm_map_entry_deallocate(entry, vm_map_is_system(map));
3941 map->size -= size;
3987 if (vm_map_is_system(map))
3999 * map.
4002 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
4007 VM_MAP_ASSERT_LOCKED(map);
4016 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
4026 (vm_map_pmap(map) != kernel_pmap &&
4033 last_timestamp = map->timestamp;
4034 (void) vm_map_unlock_and_wait(map, 0);
4035 vm_map_lock(map);
4036 if (last_timestamp + 1 != map->timestamp) {
4038 * Look again for the entry because the map was
4043 rv = vm_map_lookup_clip_start(map, saved_start,
4053 rv = vm_map_clip_end(map, entry, end);
4063 vm_map_entry_unwire(map, entry);
4072 pmap_map_delete(map->pmap, entry->start, entry->end);
4080 vm_map_entry_delete(map, entry);
4088 * Remove the given address range from the target map.
4092 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
4096 vm_map_lock(map);
4097 VM_MAP_RANGE_CHECK(map, start, end);
4098 result = vm_map_delete(map, start, end);
4099 vm_map_unlock(map);
4106 * Assert that the target map allows the specified privilege on the
4115 * The map must be locked. A read lock is sufficient.
4118 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
4124 if (!vm_map_lookup_entry(map, start, &tmp_entry))
4150 * Copies a swap-backed object from an existing map entry to a
4293 * Immediately copy these pages into the new map by simulating
4303 * Update the newly-forked vmspace each time a map entry is inherited
4336 * based on those of an existing process. The new map
4337 * is based on the old map, according to the inheritance
4338 * values on the regions in that map.
4342 * The source map must not be locked.
4480 * Insert the entry into the new map -- we know we're
4481 * inserting at the end of the new map.
4487 * Update the physical map
4497 * Clone the entry and link into the map.
4547 * map entries, which cannot be done until both old_map and
4562 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4569 MPASS((map->flags & MAP_WIREFUTURE) == 0);
4572 vm_map_lock(map);
4575 if (map->size + init_ssize > vmemlim) {
4579 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4582 vm_map_unlock(map);
4592 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4604 !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4617 if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4627 * We initially map a stack of only init_ssize, at the top of
4638 rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow,
4648 rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4670 (void)vm_map_delete(map, bot, top);
4680 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4706 if (p != initproc && (map != &p->p_vmspace->vm_map ||
4710 MPASS(!vm_map_is_system(map));
4717 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4777 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4778 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4786 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4797 if (map->size + grow_amount > vmemlim) {
4804 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4813 if (vm_map_lock_upgrade(map)) {
4815 vm_map_lock_read(map);
4831 vm_map_entry_delete(map, gap_entry);
4835 vm_map_entry_resize(map, gap_entry, -grow_amount);
4838 rv = vm_map_insert(map, NULL, 0, grow_start,
4842 rv1 = vm_map_insert1(map, NULL, 0, gap_start,
4850 vm_map_entry_resize(map, gap_entry,
4860 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4861 rv = vm_map_wire_locked(map, grow_start,
4865 vm_map_lock_downgrade(map);
4871 error = racct_set(p, RACCT_VMEM, map->size);
4875 ptoa(pmap_wired_count(map->pmap)));
4958 * specified map, assuming a page fault of the
4961 * Leaves the map in question locked for read; return
4963 * call is performed. Note that the map argument
4964 * is in/out; the returned map must be used in
4971 * specified, the map may be changed to perform virtual
4986 vm_map_t map = *var_map;
4995 vm_map_lock_read(map);
5001 if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
5002 vm_map_unlock_read(map);
5012 vm_map_t old_map = map;
5014 *var_map = map = entry->object.sub_map;
5025 if (prot == VM_PROT_NONE && map != kernel_map &&
5028 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
5033 vm_map_unlock_read(map);
5043 vm_map_unlock_read(map);
5062 * now since we've got the map locked.
5072 * -- one just moved from the map to the new
5075 if (vm_map_lock_upgrade(map))
5087 vm_map_unlock(map);
5105 vm_map_lock_downgrade(map);
5118 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) {
5119 if (vm_map_lock_upgrade(map))
5125 vm_map_lock_downgrade(map);
5143 * KERN_FAILURE instead of blocking on map lock or memory allocation.
5156 vm_map_t map = *var_map;
5163 if (!vm_map_lookup_entry(map, vaddr, out_entry))
5206 if (entry->object.vm_object == NULL && !vm_map_is_system(map))
5227 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5230 * Unlock the main-level map
5232 vm_map_unlock_read(map);
5236 vm_map_max_KBI(const struct vm_map *map)
5239 return (vm_map_max(map));
5243 vm_map_min_KBI(const struct vm_map *map)
5246 return (vm_map_min(map));
5250 vm_map_pmap_KBI(vm_map_t map)
5253 return (map->pmap);
5257 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
5260 return (vm_map_range_valid(map, start, end));
5265 _vm_map_assert_consistent(vm_map_t map, int check)
5272 ++map->nupdates;
5277 header = prev = &map->header;
5278 VM_MAP_ENTRY_FOREACH(entry, map) {
5280 ("map %p prev->end = %jx, start = %jx", map,
5283 ("map %p start = %jx, end = %jx", map,
5287 ("map %p left->start = %jx, start = %jx", map,
5291 ("map %p start = %jx, right->start = %jx", map,
5293 cur = map->root;
5300 ("map %p cannot find %jx",
5301 map, (uintmax_t)entry->start));
5306 ("map %p cannot find %jx",
5307 map, (uintmax_t)entry->start));
5310 ("map %p cannot find %jx",
5311 map, (uintmax_t)entry->start));
5318 ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5324 ("map %p prev->end = %jx, start = %jx", map,
5336 vm_map_print(vm_map_t map)
5340 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5341 (void *)map,
5342 (void *)map->pmap, map->nentries, map->timestamp);
5345 prev = &map->header;
5346 VM_MAP_ENTRY_FOREACH(entry, map) {
5347 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5366 if (prev == &map->header ||
5388 if (prev == &map->header ||
5403 DB_SHOW_COMMAND(map, map)
5407 db_printf("usage: show map <addr>\n");
5423 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",