Lines Matching +full:vm +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vnode_pager.h>
95 #include <vm/swap_pager.h>
96 #include <vm/uma.h>
102 * memory from one map to another.
106 * Maps consist of an ordered doubly-linked list of simple
107 * entries; a self-adjusting binary search tree of these
111 * which may not align with existing map entries, all
119 * by copying VM object references from one map to
120 * another, and then marking both regions as copy-on-write.
128 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
133 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
135 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
163 * addresses fall within the valid range of the map.
165 #define VM_MAP_RANGE_CHECK(map, start, end) \
167 if (start < vm_map_min(map)) \
168 start = vm_map_min(map); \
169 if (end > vm_map_max(map)) \
170 end = vm_map_max(map); \
178 * Allocate a new slab for kernel map entries. The kernel map may be locked or
179 * unlocked, depending on whether the request is coming from the kernel map or a
181 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map
198 panic("%s: kernel map is exhausted", __func__);
237 * The worst-case upper bound on the number of kernel map entries that may be
250 * User map and entry structures are allocated from the general purpose
251 * memory pool. Kernel maps are statically defined. Kernel map entries
258 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
261 * Disable the use of per-CPU buckets: map entry allocation is
262 * serialized by the kernel map lock.
268 /* Reserve an extra map entry for use when replenishing the reserve. */
275 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
289 struct vmspace *vm;
290 vm_map_t map;
292 vm = (struct vmspace *)mem;
293 map = &vm->vm_map;
295 memset(map, 0, sizeof(*map)); /* set MAP_SYSTEM_MAP to false */
296 sx_init(&map->lock, "vm map (user)");
297 PMAP_LOCK_INIT(vmspace_pmap(vm));
305 struct vmspace *vm;
307 vm = (struct vmspace *)mem;
308 KASSERT(vm->vm_map.nentries == 0,
309 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
310 KASSERT(vm->vm_map.size == 0,
311 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
322 struct vmspace *vm;
324 vm = uma_zalloc(vmspace_zone, M_WAITOK);
325 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
326 if (!pinit(vmspace_pmap(vm))) {
327 uma_zfree(vmspace_zone, vm);
330 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
331 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
332 refcount_init(&vm->vm_refcnt, 1);
333 vm->vm_shm = NULL;
334 vm->vm_swrss = 0;
335 vm->vm_tsize = 0;
336 vm->vm_dsize = 0;
337 vm->vm_ssize = 0;
338 vm->vm_taddr = 0;
339 vm->vm_daddr = 0;
340 vm->vm_maxsaddr = 0;
341 return (vm);
360 vmspace_dofree(struct vmspace *vm)
363 CTR1(KTR_VM, "vmspace_free: %p", vm);
369 shmexit(vm);
372 * Lock the map, to wait out all other references to it.
376 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
377 vm_map_max(&vm->vm_map));
379 pmap_release(vmspace_pmap(vm));
380 vm->vm_map.pmap = NULL;
381 uma_zfree(vmspace_zone, vm);
385 vmspace_free(struct vmspace *vm)
391 if (refcount_release(&vm->vm_refcnt))
392 vmspace_dofree(vm);
398 struct vmspace *vm;
401 vm = p->p_vmspace;
402 p->p_vmspace = NULL;
404 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
405 vmspace_free(vm);
411 struct vmspace *vm;
415 p = td->td_proc;
416 vm = p->p_vmspace;
426 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) {
427 if (p->p_vmspace != &vmspace0) {
429 p->p_vmspace = &vmspace0;
433 released = refcount_release(&vm->vm_refcnt);
440 if (p->p_vmspace != vm) {
442 p->p_vmspace = vm;
446 pmap_remove_pages(vmspace_pmap(vm));
448 p->p_vmspace = &vmspace0;
451 vmspace_dofree(vm);
464 struct vmspace *vm;
467 vm = p->p_vmspace;
468 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) {
472 if (vm != p->p_vmspace) {
474 vmspace_free(vm);
478 return (vm);
491 * a result, the 'newvm' vmspace always has a non-zero reference
503 KASSERT(refcount_load(&newvm->vm_refcnt) > 0,
506 oldvm = curproc->p_vmspace;
513 curproc->p_vmspace = newvm;
514 refcount_acquire(&newvm->vm_refcnt);
523 _vm_map_lock(vm_map_t map, const char *file, int line)
526 if (vm_map_is_system(map))
527 mtx_lock_flags_(&map->system_mtx, 0, file, line);
529 sx_xlock_(&map->lock, file, line);
530 map->timestamp++;
540 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
542 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
544 object = entry->object.vm_object;
546 if ((object->flags & OBJ_ANON) != 0)
547 object = object->handle;
549 KASSERT(object->backing_object == NULL,
550 ("non-anon object %p shadows", object));
552 entry, entry->object.vm_object));
587 entry = td->td_map_def_user;
588 td->td_map_def_user = NULL;
590 next = entry->defer_next;
591 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
594 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
599 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
601 object = entry->object.vm_object;
603 vm_pager_release_writecount(object, entry->start,
604 entry->end);
614 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
617 if (vm_map_is_system(map))
618 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
620 sx_assert_(&map->lock, SA_XLOCKED, file, line);
623 #define VM_MAP_ASSERT_LOCKED(map) \
624 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
633 &enable_vmmap_check, 0, "Enable vm map consistency checking");
635 static void _vm_map_assert_consistent(vm_map_t map, int check);
637 #define VM_MAP_ASSERT_CONSISTENT(map) \
638 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
640 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \
641 if (map->nupdates > map->nentries) { \
642 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \
643 map->nupdates = 0; \
647 #define VM_MAP_UNLOCK_CONSISTENT(map)
650 #define VM_MAP_ASSERT_LOCKED(map)
651 #define VM_MAP_ASSERT_CONSISTENT(map)
652 #define VM_MAP_UNLOCK_CONSISTENT(map)
656 _vm_map_unlock(vm_map_t map, const char *file, int line)
659 VM_MAP_UNLOCK_CONSISTENT(map);
660 if (vm_map_is_system(map)) {
662 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
664 map->flags &= ~MAP_REPLENISH;
667 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
669 sx_xunlock_(&map->lock, file, line);
675 _vm_map_lock_read(vm_map_t map, const char *file, int line)
678 if (vm_map_is_system(map))
679 mtx_lock_flags_(&map->system_mtx, 0, file, line);
681 sx_slock_(&map->lock, file, line);
685 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
688 if (vm_map_is_system(map)) {
689 KASSERT((map->flags & MAP_REPLENISH) == 0,
691 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
693 sx_sunlock_(&map->lock, file, line);
699 _vm_map_trylock(vm_map_t map, const char *file, int line)
703 error = vm_map_is_system(map) ?
704 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
705 !sx_try_xlock_(&map->lock, file, line);
707 map->timestamp++;
712 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
716 error = vm_map_is_system(map) ?
717 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
718 !sx_try_slock_(&map->lock, file, line);
725 * Tries to upgrade a read (shared) lock on the specified map to a write
727 * non-zero value if the upgrade fails. If the upgrade fails, the map is
730 * Requires that the map be read locked.
733 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
737 if (vm_map_is_system(map)) {
738 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
740 if (!sx_try_upgrade_(&map->lock, file, line)) {
741 last_timestamp = map->timestamp;
742 sx_sunlock_(&map->lock, file, line);
745 * If the map's timestamp does not change while the
746 * map is unlocked, then the upgrade succeeds.
748 sx_xlock_(&map->lock, file, line);
749 if (last_timestamp != map->timestamp) {
750 sx_xunlock_(&map->lock, file, line);
755 map->timestamp++;
760 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
763 if (vm_map_is_system(map)) {
764 KASSERT((map->flags & MAP_REPLENISH) == 0,
766 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
768 VM_MAP_UNLOCK_CONSISTENT(map);
769 sx_downgrade_(&map->lock, file, line);
776 * Returns a non-zero value if the caller holds a write (exclusive) lock
777 * on the specified map and the value "0" otherwise.
780 vm_map_locked(vm_map_t map)
783 if (vm_map_is_system(map))
784 return (mtx_owned(&map->system_mtx));
785 return (sx_xlocked(&map->lock));
791 * Atomically releases the lock on the specified map and puts the calling
793 * vm_map_wakeup() is performed on the map or the specified timeout is
797 * objects and map entries. Therefore, the calling thread is expected to
798 * reacquire the map lock after reawakening and later perform an ordinary
800 * operation on the map.
803 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
806 VM_MAP_UNLOCK_CONSISTENT(map);
808 if (vm_map_is_system(map)) {
809 KASSERT((map->flags & MAP_REPLENISH) == 0,
811 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
813 sx_xunlock_(&map->lock, file, line);
815 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
822 * Awaken any threads that have slept on the map using
826 vm_map_wakeup(vm_map_t map)
831 * from being performed (and lost) between the map unlock
836 wakeup(&map->root);
840 vm_map_busy(vm_map_t map)
843 VM_MAP_ASSERT_LOCKED(map);
844 map->busy++;
848 vm_map_unbusy(vm_map_t map)
851 VM_MAP_ASSERT_LOCKED(map);
852 KASSERT(map->busy, ("vm_map_unbusy: not busy"));
853 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
854 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
855 wakeup(&map->busy);
860 vm_map_wait_busy(vm_map_t map)
863 VM_MAP_ASSERT_LOCKED(map);
864 while (map->busy) {
865 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
866 if (vm_map_is_system(map))
867 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
869 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
871 map->timestamp++;
885 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
888 map->header.eflags = MAP_ENTRY_HEADER;
889 map->pmap = pmap;
890 map->header.end = min;
891 map->header.start = max;
892 map->flags = 0;
893 map->header.left = map->header.right = &map->header;
894 map->root = NULL;
895 map->timestamp = 0;
896 map->busy = 0;
897 map->anon_loc = 0;
899 map->nupdates = 0;
904 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
906 _vm_map_init(map, pmap, min, max);
907 sx_init(&map->lock, "vm map (user)");
911 vm_map_init_system(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
913 _vm_map_init(map, pmap, min, max);
914 vm_map_modflags(map, MAP_SYSTEM_MAP, 0);
915 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF |
925 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
927 uma_zfree(vm_map_is_system(map) ? kmapentzone : mapentzone, entry);
933 * Allocates a VM map entry for insertion.
937 vm_map_entry_create(vm_map_t map)
942 if (map == kernel_map) {
943 VM_MAP_ASSERT_LOCKED(map);
946 * A new slab of kernel map entries cannot be allocated at this
947 * point because the kernel map has not yet been updated to
949 * map entry, dipping into the reserve if necessary, and set a
951 * the map is unlocked.
957 kernel_map->flags |= MAP_REPLENISH;
961 if (vm_map_is_system(map)) {
980 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
995 return (root->left != left_ancestor ?
996 root->left->max_free : root->start - left_ancestor->end);
1003 return (root->right != right_ancestor ?
1004 root->right->max_free : right_ancestor->start - root->end);
1019 prior = entry->left;
1020 if (prior->right->start < entry->start) {
1022 prior = prior->right;
1023 while (prior->right != entry);
1040 * Infer root->right->max_free == root->max_free when \
1041 * y->max_free < root->max_free || root->max_free == 0. \
1044 y = root->left; \
1045 max_free = root->max_free; \
1050 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1054 z = y->right; \
1056 root->left = z; \
1057 y->right = root; \
1058 if (max_free < y->max_free) \
1059 root->max_free = max_free = \
1060 vm_size_max(max_free, z->max_free); \
1061 } else if (max_free < y->max_free) \
1062 root->max_free = max_free = \
1063 vm_size_max(max_free, root->start - y->end);\
1065 y = root->left; \
1067 /* Copy right->max_free. Put root on rlist. */ \
1068 root->max_free = max_free; \
1071 root->left = rlist; \
1081 * Infer root->left->max_free == root->max_free when \
1082 * y->max_free < root->max_free || root->max_free == 0. \
1085 y = root->right; \
1086 max_free = root->max_free; \
1091 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1095 z = y->left; \
1097 root->right = z; \
1098 y->left = root; \
1099 if (max_free < y->max_free) \
1100 root->max_free = max_free = \
1101 vm_size_max(max_free, z->max_free); \
1102 } else if (max_free < y->max_free) \
1103 root->max_free = max_free = \
1104 vm_size_max(max_free, y->start - root->end);\
1106 y = root->right; \
1108 /* Copy left->max_free. Put root on llist. */ \
1109 root->max_free = max_free; \
1112 root->right = llist; \
1120 * subtrees with root->max_free < length as empty trees. llist and rlist are
1121 * the two sides in reverse order (bottom-up), with llist linked by the right
1123 * lists terminated by &map->header. This function, and the subsequent call to
1125 * values in &map->header.
1128 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1133 left = right = &map->header;
1134 root = map->root;
1135 while (root != NULL && root->max_free >= length) {
1136 KASSERT(left->end <= root->start &&
1137 root->end <= right->start,
1139 if (addr < root->start) {
1141 y->max_free >= length && addr < y->start);
1142 } else if (addr >= root->end) {
1144 y->max_free >= length && addr >= y->end);
1159 hi = root->right == right ? NULL : root->right;
1174 lo = root->left == left ? NULL : root->left;
1204 * llist->max_free and max_free. Update with the
1207 llist->max_free = max_free =
1208 vm_size_max(llist->max_free, max_free);
1209 vm_map_entry_swap(&llist->right, &tail);
1212 root->left = tail;
1225 max_free = root->start - llist->end;
1230 root->left = header;
1231 header->right = root;
1248 root->left == llist ? root : root->left,
1261 * rlist->max_free and max_free. Update with the
1264 rlist->max_free = max_free =
1265 vm_size_max(rlist->max_free, max_free);
1266 vm_map_entry_swap(&rlist->left, &tail);
1269 root->right = tail;
1282 max_free = rlist->start - root->end;
1287 root->right = header;
1288 header->left = root;
1305 root->right == rlist ? root : root->right,
1314 * The Sleator and Tarjan top-down splay algorithm with the
1315 * following variation. Max_free must be computed bottom-up, so
1325 * child, its right pointer points to its successor. The map header node
1326 * is the predecessor of the first map entry, and the successor of the
1332 * The map must be locked, and leaves it so.
1337 vm_map_splay(vm_map_t map, vm_offset_t addr)
1342 header = &map->header;
1343 root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1353 llist = root->right;
1362 rlist = root->left;
1369 root->max_free = vm_size_max(max_free_left, max_free_right);
1370 map->root = root;
1371 VM_MAP_ASSERT_CONSISTENT(map);
1385 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1391 "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1392 map->nentries, entry);
1393 VM_MAP_ASSERT_LOCKED(map);
1394 map->nentries++;
1395 header = &map->header;
1396 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1400 * map, so it becomes the new root of the map tree.
1404 } else if (entry->start == root->start) {
1409 * the modified map.
1411 KASSERT(entry->end < root->end,
1414 if ((root->eflags & MAP_ENTRY_STACK_GAP) == 0)
1415 root->offset += entry->end - root->start;
1416 root->start = entry->end;
1418 max_free_right = root->max_free = vm_size_max(
1426 * the modified map.
1428 KASSERT(entry->end == root->end,
1431 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
1432 entry->offset += entry->start - root->start;
1433 root->end = entry->start;
1434 max_free_left = root->max_free = vm_size_max(
1439 entry->max_free = vm_size_max(max_free_left, max_free_right);
1440 map->root = entry;
1441 VM_MAP_ASSERT_CONSISTENT(map);
1450 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1456 VM_MAP_ASSERT_LOCKED(map);
1457 header = &map->header;
1458 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1465 rlist->start = root->start;
1466 MPASS((rlist->eflags & MAP_ENTRY_STACK_GAP) == 0);
1467 rlist->offset = root->offset;
1471 llist = root->right;
1476 rlist = root->left;
1480 header->left = header->right = header;
1484 root->max_free = vm_size_max(max_free_left, max_free_right);
1485 map->root = root;
1486 VM_MAP_ASSERT_CONSISTENT(map);
1487 map->nentries--;
1488 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1489 map->nentries, entry);
1498 * The map must be locked, and leaves it so.
1501 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1505 VM_MAP_ASSERT_LOCKED(map);
1506 header = &map->header;
1507 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1510 entry->end += grow_amount;
1511 root->max_free = vm_size_max(
1514 map->root = root;
1515 VM_MAP_ASSERT_CONSISTENT(map);
1516 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1517 __func__, map, map->nentries, entry);
1523 * Finds the map entry containing (or
1525 * in the given map; the entry is returned
1528 * actually contained in the map.
1532 vm_map_t map,
1540 * If the map is empty, then the map entry immediately preceding
1541 * "address" is the map's header.
1543 header = &map->header;
1544 cur = map->root;
1549 if (address >= cur->start && cur->end > address) {
1553 if ((locked = vm_map_locked(map)) ||
1554 sx_try_upgrade(&map->lock)) {
1556 * Splay requires a write lock on the map. However, it only
1558 * change the map. Thus, the map's timestamp need not change
1561 cur = vm_map_splay(map, address);
1563 VM_MAP_UNLOCK_CONSISTENT(map);
1564 sx_downgrade(&map->lock);
1568 * If "address" is contained within a map entry, the new root
1569 * is that map entry. Otherwise, the new root is a map entry
1572 if (address < cur->start) {
1577 return (address < cur->end);
1580 * Since the map is only locked for read access, perform a
1585 if (address < cur->start) {
1587 cur = cur->left;
1590 } else if (cur->end <= address) {
1592 cur = cur->right;
1606 * returns the newly inserted map entry in '*res'. In case the new
1612 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1623 VM_MAP_ASSERT_LOCKED(map);
1637 if (start == end || !vm_map_range_valid(map, start, end))
1640 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
1648 if (vm_map_lookup_entry(map, start, &prev_entry))
1655 if (next_entry->start < end)
1691 bdry = pagesizes[bidx] - 1;
1702 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1706 object->cred == NULL,
1708 cred = curthread->td_ucred;
1713 if (map == kernel_map && end > kernel_vm_end)
1723 if ((object->flags & OBJ_ANON) != 0) {
1725 if (object->ref_count > 1 || object->shadow_count != 0)
1729 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1732 prev_entry->end == start && (prev_entry->cred == cred ||
1733 (prev_entry->object.vm_object != NULL &&
1734 prev_entry->object.vm_object->cred == cred)) &&
1735 vm_object_coalesce(prev_entry->object.vm_object,
1736 prev_entry->offset,
1737 (vm_size_t)(prev_entry->end - prev_entry->start),
1738 (vm_size_t)(end - prev_entry->end), cred != NULL &&
1742 * can extend the previous map entry to include the
1745 if (prev_entry->inheritance == inheritance &&
1746 prev_entry->protection == prot &&
1747 prev_entry->max_protection == max &&
1748 prev_entry->wired_count == 0) {
1749 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1752 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1753 map->size += end - prev_entry->end;
1754 vm_map_entry_resize(map, prev_entry,
1755 end - prev_entry->end);
1756 *res = vm_map_try_merge_entries(map, prev_entry,
1763 * map entry, we have to create a new map entry. We
1767 object = prev_entry->object.vm_object;
1768 offset = prev_entry->offset +
1769 (prev_entry->end - prev_entry->start);
1771 if (cred != NULL && object != NULL && object->cred != NULL &&
1772 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1783 new_entry = vm_map_entry_create(map);
1784 new_entry->start = start;
1785 new_entry->end = end;
1786 new_entry->cred = NULL;
1788 new_entry->eflags = protoeflags;
1789 new_entry->object.vm_object = object;
1790 new_entry->offset = offset;
1792 new_entry->inheritance = inheritance;
1793 new_entry->protection = prot;
1794 new_entry->max_protection = max;
1795 new_entry->wired_count = 0;
1796 new_entry->wiring_thread = NULL;
1797 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1798 new_entry->next_read = start;
1802 new_entry->cred = cred;
1807 vm_map_entry_link(map, new_entry);
1808 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1809 map->size += new_entry->end - new_entry->start;
1817 vm_map_try_merge_entries(map, prev_entry, new_entry);
1818 *res = vm_map_try_merge_entries(map, new_entry, next_entry);
1821 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1822 end - start, cow & MAP_PREFAULT_PARTIAL);
1831 * Inserts the given VM object into the target map at the
1834 * Requires that the map be locked, and leaves it so.
1836 * If object is non-NULL, ref count must be bumped by caller
1840 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1845 return (vm_map_insert1(map, object, offset, start, end, prot, max,
1852 * Find the first fit (lowest VM address) for "length" free bytes
1853 * beginning at address >= start in the given map.
1861 * The map must be locked, and leaves it so.
1864 * vm_map_max(map)-length+1 if insufficient space.
1867 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1873 VM_MAP_ASSERT_LOCKED(map);
1876 * Request must fit within min/max VM address and must avoid
1879 start = MAX(start, vm_map_min(map));
1880 if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1881 return (vm_map_max(map) - length + 1);
1884 if (map->root == NULL)
1891 * enough; otherwise set gap_end to start skip gap-checking and move
1894 header = &map->header;
1895 root = vm_map_splay_split(map, start, length, &llist, &rlist);
1896 gap_end = rlist->start;
1898 start = root->end;
1899 if (root->right != rlist)
1905 rlist = root->left;
1910 llist = root->right;
1914 root->max_free = vm_size_max(max_free_left, max_free_right);
1915 map->root = root;
1916 VM_MAP_ASSERT_CONSISTENT(map);
1917 if (length <= gap_end - start)
1921 if (root->right == header || length > root->right->max_free)
1922 return (vm_map_max(map) - length + 1);
1925 * Splay for the least large-enough gap in the right subtree.
1940 llist = root->right;
1943 root->max_free = vm_size_max(max_free_left,
1947 rlist = y->left;
1948 y->max_free = vm_size_max(
1951 root->max_free = vm_size_max(max_free_left, y->max_free);
1953 map->root = root;
1954 VM_MAP_ASSERT_CONSISTENT(map);
1955 return (root->end);
1959 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1968 ("vm_map_fixed: non-NULL backing object for stack"));
1969 vm_map_lock(map);
1970 VM_MAP_RANGE_CHECK(map, start, end);
1972 result = vm_map_delete(map, start, end);
1977 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1980 result = vm_map_insert(map, object, offset, start, end,
1984 vm_map_unlock(map);
2024 * Searches for the specified amount of free space in the given map with the
2025 * specified alignment. Performs an address-ordered, first-fit search from
2032 * The map must be locked. Initially, there must be at least "length" bytes
2036 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2042 VM_MAP_ASSERT_LOCKED(map);
2044 KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
2072 *addr = vm_map_findspace(map, aligned_addr, length);
2073 if (*addr + length > vm_map_max(map) ||
2089 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
2093 *addr = vm_map_findspace(map, *addr, length);
2094 if (*addr + length > vm_map_max(map) ||
2097 return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr,
2103 * map with the given length. The search is defined to be
2104 * first-fit from the specified address; the region found is
2107 * If object is non-NULL, ref count must be bumped by caller
2111 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2118 vm_map_lock(map);
2119 rv = vm_map_find_locked(map, object, offset, addr, length, max_addr,
2121 vm_map_unlock(map);
2126 vm_map_find_locked(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2136 ("non-NULL backing object for stack"));
2140 (object->flags & OBJ_COLORED) == 0))
2147 en_aslr = (map->flags & MAP_ASLR) != 0;
2149 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2156 (map->flags & MAP_ASLR_IGNSTART) != 0)
2157 curr_min_addr = min_addr = vm_map_min(map);
2160 curr_min_addr = map->anon_loc;
2178 * perform a first-fit search of the available address
2195 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2196 vm_map_min(map) : min_addr;
2219 pidx--;
2222 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2225 *addr = vm_map_findspace(map, curr_min_addr,
2228 vm_map_max(map))
2235 *addr = vm_map_findspace(map, curr_min_addr, length);
2236 if (*addr + length > vm_map_max(map) ||
2248 (rv = vm_map_alignspace(map, object, offset, addr, length,
2260 if (!vm_map_range_valid(map, *addr, *addr + length))
2262 rv = vm_map_delete(map, *addr, *addr + length);
2267 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2270 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2279 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 ||
2280 *addr < map->anon_loc))
2281 map->anon_loc = *addr;
2298 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2312 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2321 * A map entry with any of the following flags set must not be merged with
2332 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2333 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2336 return (prev->end == entry->start &&
2337 prev->object.vm_object == entry->object.vm_object &&
2338 (prev->object.vm_object == NULL ||
2339 prev->offset + (prev->end - prev->start) == entry->offset) &&
2340 prev->eflags == entry->eflags &&
2341 prev->protection == entry->protection &&
2342 prev->max_protection == entry->max_protection &&
2343 prev->inheritance == entry->inheritance &&
2344 prev->wired_count == entry->wired_count &&
2345 prev->cred == entry->cred);
2349 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2355 * the vnode has additional references. Thus, the map lock can be
2356 * kept without causing a lock-order reversal with the vnode lock.
2359 * object->un_pager.vnp.writemappings, the writemappings value
2362 if (entry->object.vm_object != NULL)
2363 vm_object_deallocate(entry->object.vm_object);
2364 if (entry->cred != NULL)
2365 crfree(entry->cred);
2366 vm_map_entry_dispose(map, entry);
2372 * Compare two map entries that represent consecutive ranges. If
2375 * the map entry that includes the first range.
2377 * The map must be locked.
2380 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2384 VM_MAP_ASSERT_LOCKED(map);
2385 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2387 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2388 vm_map_merged_neighbor_dispose(map, prev_entry);
2397 * Allocate an object to back a map entry.
2404 KASSERT(entry->object.vm_object == NULL,
2405 ("map entry %p has backing object", entry));
2406 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2407 ("map entry %p is a submap", entry));
2408 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2409 entry->cred, entry->end - entry->start);
2410 entry->object.vm_object = object;
2411 entry->offset = 0;
2412 entry->cred = NULL;
2422 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2425 VM_MAP_ASSERT_LOCKED(map);
2426 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2427 ("map entry %p is a submap", entry));
2428 if (entry->object.vm_object == NULL && !vm_map_is_system(map) &&
2429 (entry->eflags & MAP_ENTRY_GUARD) == 0)
2431 else if (entry->object.vm_object != NULL &&
2432 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2433 entry->cred != NULL) {
2434 VM_OBJECT_WLOCK(entry->object.vm_object);
2435 KASSERT(entry->object.vm_object->cred == NULL,
2437 entry->object.vm_object->cred = entry->cred;
2438 entry->object.vm_object->charge = entry->end - entry->start;
2439 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2440 entry->cred = NULL;
2447 * Create a duplicate map entry for clipping.
2450 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2454 VM_MAP_ASSERT_LOCKED(map);
2458 * objects won't be created after the map entry is split.
2460 vm_map_entry_charge_object(map, entry);
2463 new_entry = vm_map_entry_create(map);
2465 if (new_entry->cred != NULL)
2466 crhold(entry->cred);
2467 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2468 vm_object_reference(new_entry->object.vm_object);
2471 * The object->un_pager.vnp.writemappings for the object of
2473 * virtual pages are re-distributed among the clipped entries,
2488 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2493 if (!vm_map_is_system(map))
2495 "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2498 if (startaddr <= entry->start)
2501 VM_MAP_ASSERT_LOCKED(map);
2502 KASSERT(entry->end > startaddr && entry->start < startaddr,
2507 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2511 new_entry = vm_map_entry_clone(map, entry);
2517 new_entry->end = startaddr;
2518 vm_map_entry_link(map, new_entry);
2530 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2536 if (!vm_map_is_system(map))
2538 "%s: map %p start 0x%jx prev %p", __func__, map,
2541 if (vm_map_lookup_entry(map, start, prev_entry)) {
2543 rv = vm_map_clip_start(map, entry, start);
2561 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2566 if (!vm_map_is_system(map))
2568 "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2571 if (endaddr >= entry->end)
2574 VM_MAP_ASSERT_LOCKED(map);
2575 KASSERT(entry->start < endaddr && entry->end > endaddr,
2580 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2584 new_entry = vm_map_entry_clone(map, entry);
2590 new_entry->start = endaddr;
2591 vm_map_entry_link(map, new_entry);
2599 * Mark the given range as handled by a subordinate map.
2611 * range from the superior map, and then destroy the
2616 vm_map_t map,
2627 submap->flags |= MAP_IS_SUB_MAP;
2630 vm_map_lock(map);
2631 VM_MAP_RANGE_CHECK(map, start, end);
2632 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2633 (entry->eflags & MAP_ENTRY_COW) == 0 &&
2634 entry->object.vm_object == NULL) {
2635 result = vm_map_clip_start(map, entry, start);
2638 result = vm_map_clip_end(map, entry, end);
2641 entry->object.sub_map = submap;
2642 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2646 vm_map_unlock(map);
2650 submap->flags &= ~MAP_IS_SUB_MAP;
2657 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2664 * Preload the specified map's pmap with mappings to the specified
2665 * object's memory-resident pages. No further physical pages are
2668 * limited number of page mappings are created at the low-end of the
2674 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2684 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2686 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2687 pmap_object_init_pt(map->pmap, addr, object, pindex,
2697 if (psize + pindex > object->size) {
2698 if (pindex >= object->size) {
2702 psize = object->size - pindex;
2716 p != NULL && (tmpidx = p->pindex - pindex) < psize;
2735 for (psind = p->psind; psind > 0; psind--) {
2737 (pagesizes[psind] - 1)) == 0) {
2738 mask = atop(pagesizes[psind]) - 1;
2749 pmap_enter_object(map->pmap, start, addr +
2755 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2766 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
2767 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
2770 old_prot = PROT_EXTRACT(entry->offset);
2772 entry->offset = PROT_MAX(new_maxprot) |
2776 entry->offset = new_prot | PROT_MAX(
2777 PROT_MAX_EXTRACT(entry->offset));
2785 * specified address region in the target map.
2788 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2810 vm_map_lock(map);
2812 if ((map->flags & MAP_WXORX) != 0 &&
2815 vm_map_unlock(map);
2821 * need to fault pages into the map and will drop the map lock while
2822 * doing so, and the VM object may end up in an inconsistent state if we
2823 * update the protection on the map entry in between faults.
2825 vm_map_wait_busy(map);
2827 VM_MAP_RANGE_CHECK(map, start, end);
2829 if (!vm_map_lookup_entry(map, start, &first_entry))
2833 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
2841 while (!CONTAINS_BITS(first_entry->eflags,
2843 first_entry != vm_map_entry_first(map))
2845 start = first_entry->start;
2856 for (entry = first_entry; entry->start < end;
2858 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2859 vm_map_unlock(map);
2862 if ((entry->eflags & (MAP_ENTRY_GUARD |
2865 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ?
2866 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection;
2868 vm_map_unlock(map);
2871 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2876 * Postpone the operation until all in-transition map entries have
2877 * stabilized. An in-transition entry might already have its pages
2883 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2884 vm_map_unlock_and_wait(map, 0);
2890 * private (i.e., copy-on-write) mappings that are transitioning from
2891 * read-only to read/write access. If a reservation fails, break out
2895 rv = vm_map_clip_start(map, first_entry, start);
2897 vm_map_unlock(map);
2900 for (entry = first_entry; entry->start < end;
2902 rv = vm_map_clip_end(map, entry, end);
2904 vm_map_unlock(map);
2909 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2911 (entry->eflags & MAP_ENTRY_GUARD) != 0)
2914 cred = curthread->td_ucred;
2915 obj = entry->object.vm_object;
2918 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2919 if (!swap_reserve(entry->end - entry->start)) {
2921 end = entry->end;
2925 entry->cred = cred;
2930 if ((obj->flags & OBJ_SWAP) == 0) {
2937 * we cannot distinguish between non-charged and
2940 KASSERT(obj->charge == 0,
2943 if (!swap_reserve(ptoa(obj->size))) {
2946 end = entry->end;
2951 obj->cred = cred;
2952 obj->charge = ptoa(obj->size);
2962 entry->start < end;
2963 vm_map_try_merge_entries(map, prev_entry, entry),
2968 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
2974 old_prot = entry->protection;
2977 entry->max_protection = new_maxprot;
2978 entry->protection = new_maxprot & old_prot;
2981 entry->protection = new_prot;
2984 * For user wired map entries, the normal lazy evaluation of
2987 * copy-on-write and enable write access in the physical map.
2989 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2990 (entry->protection & VM_PROT_WRITE) != 0 &&
2992 vm_fault_copy_entry(map, map, entry, entry, NULL);
2995 * When restricting access, update the physical map. Worry
2996 * about copy-on-write here.
2998 if ((old_prot & ~entry->protection) != 0) {
2999 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
3001 pmap_protect(map->pmap, entry->start,
3002 entry->end,
3003 entry->protection & MASK(entry));
3007 vm_map_try_merge_entries(map, prev_entry, entry);
3008 vm_map_unlock(map);
3015 * This routine traverses a processes map handling the madvise
3022 vm_map_t map,
3033 * we need to use an exclusive lock on the map and we need to perform
3034 * various clipping operations. Otherwise we only need a read-lock
3035 * on the map.
3048 vm_map_lock(map);
3056 vm_map_lock_read(map);
3065 VM_MAP_RANGE_CHECK(map, start, end);
3074 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
3076 vm_map_unlock(map);
3080 for (; entry->start < end; prev_entry = entry,
3082 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
3085 rv = vm_map_clip_end(map, entry, end);
3087 vm_map_unlock(map);
3105 entry->eflags |= MAP_ENTRY_NOSYNC;
3108 entry->eflags &= ~MAP_ENTRY_NOSYNC;
3111 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
3114 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3119 vm_map_try_merge_entries(map, prev_entry, entry);
3121 vm_map_try_merge_entries(map, prev_entry, entry);
3122 vm_map_unlock(map);
3133 if (!vm_map_lookup_entry(map, start, &entry))
3135 for (; entry->start < end;
3139 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP |
3146 * we hold the VM map read-locked, neither the
3151 entry->object.vm_object != NULL &&
3152 entry->object.vm_object->backing_object != NULL)
3155 pstart = OFF_TO_IDX(entry->offset);
3156 pend = pstart + atop(entry->end - entry->start);
3157 useStart = entry->start;
3158 useEnd = entry->end;
3160 if (entry->start < start) {
3161 pstart += atop(start - entry->start);
3164 if (entry->end > end) {
3165 pend -= atop(entry->end - end);
3183 pmap_advise(map->pmap, useStart, useEnd,
3186 vm_object_madvise(entry->object.vm_object, pstart,
3190 * Pre-populate paging structures in the
3195 entry->wired_count == 0) {
3196 vm_map_pmap_enter(map,
3198 entry->protection,
3199 entry->object.vm_object,
3201 ptoa(pend - pstart),
3206 vm_map_unlock_read(map);
3215 * range in the target map. Inheritance
3216 * affects how the map will be shared with
3220 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
3237 vm_map_lock(map);
3238 VM_MAP_RANGE_CHECK(map, start, end);
3239 rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry);
3242 if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3243 rv = vm_map_clip_end(map, lentry, end);
3248 for (entry = start_entry; entry->start < end;
3250 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3257 for (entry = start_entry; entry->start < end; prev_entry = entry,
3259 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3260 entry, (uintmax_t)entry->end, (uintmax_t)end));
3261 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3263 entry->inheritance = new_inheritance;
3264 vm_map_try_merge_entries(map, prev_entry, entry);
3266 vm_map_try_merge_entries(map, prev_entry, entry);
3268 vm_map_unlock(map);
3275 * Release the map lock, and sleep until the entry is no longer in
3276 * transition. Awake and acquire the map lock. If the map changed while
3277 * another held the lock, lookup a possibly-changed entry at or after the
3281 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3288 VM_MAP_ASSERT_LOCKED(map);
3289 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3290 ("not in-tranition map entry %p", in_entry));
3294 start = MAX(in_start, in_entry->start);
3295 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3296 last_timestamp = map->timestamp;
3297 if (vm_map_unlock_and_wait(map, 0)) {
3302 vm_map_lock(map);
3303 if (last_timestamp + 1 == map->timestamp)
3307 * Look again for the entry because the map was modified while it was
3311 if (!vm_map_lookup_entry(map, start, &entry)) {
3327 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3338 vm_map_lock(map);
3339 VM_MAP_RANGE_CHECK(map, start, end);
3340 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3344 vm_map_unlock(map);
3349 for (entry = first_entry; entry->start < end; entry = next_entry) {
3350 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3354 next_entry = vm_map_entry_in_transition(map, start,
3358 vm_map_unlock(map);
3368 rv = vm_map_clip_start(map, entry, start);
3371 rv = vm_map_clip_end(map, entry, end);
3376 * Mark the entry in case the map lock is released. (See
3379 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3380 entry->wiring_thread == NULL,
3381 ("owned map entry %p", entry));
3382 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3383 entry->wiring_thread = curthread;
3386 * Check the map for holes in the specified region.
3390 entry->end < end && next_entry->start > entry->end) {
3391 end = entry->end;
3400 end = entry->end;
3407 !vm_map_lookup_entry(map, start, &first_entry)) {
3415 for (; entry->start < end;
3420 * while the map lock was dropped for draining
3426 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3427 entry->wiring_thread != curthread) {
3434 (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3435 if (entry->wired_count == 1)
3436 vm_map_entry_unwire(map, entry);
3438 entry->wired_count--;
3440 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3442 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3443 ("vm_map_unwire: in-transition flag missing %p", entry));
3444 KASSERT(entry->wiring_thread == curthread,
3446 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3447 entry->wiring_thread = NULL;
3448 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3449 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3452 vm_map_try_merge_entries(map, prev_entry, entry);
3454 vm_map_try_merge_entries(map, prev_entry, entry);
3455 vm_map_unlock(map);
3457 vm_map_wakeup(map);
3488 * The map should be locked.
3491 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3495 VM_MAP_ASSERT_LOCKED(map);
3496 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3497 entry->wired_count == 1,
3499 KASSERT(failed_addr < entry->end,
3506 if (failed_addr > entry->start) {
3507 pmap_unwire(map->pmap, entry->start, failed_addr);
3508 vm_object_unwire(entry->object.vm_object, entry->offset,
3509 failed_addr - entry->start, PQ_ACTIVE);
3513 * Assign an out-of-range value to represent the failure to wire this
3516 entry->wired_count = -1;
3520 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3524 vm_map_lock(map);
3525 rv = vm_map_wire_locked(map, start, end, flags);
3526 vm_map_unlock(map);
3533 * Implements both kernel and user wiring. Returns with the map locked,
3534 * the map lock may be dropped.
3537 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3547 VM_MAP_ASSERT_LOCKED(map);
3556 VM_MAP_RANGE_CHECK(map, start, end);
3557 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3563 for (entry = first_entry; entry->start < end; entry = next_entry) {
3564 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3568 next_entry = vm_map_entry_in_transition(map, start,
3580 rv = vm_map_clip_start(map, entry, start);
3583 rv = vm_map_clip_end(map, entry, end);
3588 * Mark the entry in case the map lock is released. (See
3591 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3592 entry->wiring_thread == NULL,
3593 ("owned map entry %p", entry));
3594 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3595 entry->wiring_thread = curthread;
3596 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3597 || (entry->protection & prot) != prot) {
3598 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3600 end = entry->end;
3604 } else if (entry->wired_count == 0) {
3605 entry->wired_count++;
3607 npages = atop(entry->end - entry->start);
3609 vm_map_wire_entry_failure(map, entry,
3610 entry->start);
3611 end = entry->end;
3617 * Release the map lock, relying on the in-transition
3618 * mark. Mark the map busy for fork.
3620 saved_start = entry->start;
3621 saved_end = entry->end;
3622 last_timestamp = map->timestamp;
3625 vm_map_busy(map);
3626 vm_map_unlock(map);
3632 * it into the physical map.
3634 rv = vm_fault(map, faddr, VM_PROT_NONE,
3639 vm_map_lock(map);
3640 vm_map_unbusy(map);
3641 if (last_timestamp + 1 != map->timestamp) {
3643 * Look again for the entry because the map was
3648 if (!vm_map_lookup_entry(map, saved_start,
3654 for (entry = next_entry; entry->end < saved_end;
3663 faddr < entry->end)
3664 vm_map_wire_entry_failure(map,
3669 vm_map_wire_entry_failure(map, entry, faddr);
3672 end = entry->end;
3676 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3677 entry->wired_count++;
3680 * Check the map for holes in the specified region.
3685 entry->end < end && next_entry->start > entry->end) {
3686 end = entry->end;
3695 !vm_map_lookup_entry(map, start, &first_entry)) {
3703 for (; entry->start < end;
3708 * while the map lock was dropped for faulting in the
3718 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3719 entry->wiring_thread != curthread) {
3725 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3729 entry->eflags |= MAP_ENTRY_USER_WIRED;
3730 } else if (entry->wired_count == -1) {
3735 entry->wired_count = 0;
3737 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3742 if (entry->wired_count == 1) {
3743 vm_map_entry_unwire(map, entry);
3746 atop(entry->end - entry->start));
3748 entry->wired_count--;
3750 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3751 ("vm_map_wire: in-transition flag missing %p", entry));
3752 KASSERT(entry->wiring_thread == curthread,
3754 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3756 entry->wiring_thread = NULL;
3757 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3758 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3761 vm_map_try_merge_entries(map, prev_entry, entry);
3763 vm_map_try_merge_entries(map, prev_entry, entry);
3765 vm_map_wakeup(map);
3787 vm_map_t map,
3801 vm_map_lock_read(map);
3802 VM_MAP_RANGE_CHECK(map, start, end);
3803 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3804 vm_map_unlock_read(map);
3807 start = first_entry->start;
3808 end = first_entry->end;
3812 * Make a first pass to check for user-wired memory, holes,
3815 for (entry = first_entry; entry->start < end; entry = next_entry) {
3817 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3818 vm_map_unlock_read(map);
3823 ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3824 (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3825 vm_map_unlock_read(map);
3830 if (end > entry->end &&
3831 entry->end != next_entry->start) {
3832 vm_map_unlock_read(map);
3838 pmap_remove(map->pmap, start, end);
3845 for (entry = first_entry; entry->start < end;) {
3846 offset = entry->offset + (start - entry->start);
3847 size = (end <= entry->end ? end : entry->end) - start;
3848 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3853 smap = entry->object.sub_map;
3856 tsize = tentry->end - offset;
3859 object = tentry->object.vm_object;
3860 offset = tentry->offset + (offset - tentry->start);
3863 object = entry->object.vm_object;
3866 last_timestamp = map->timestamp;
3867 vm_map_unlock_read(map);
3872 vm_map_lock_read(map);
3873 if (last_timestamp == map->timestamp ||
3874 !vm_map_lookup_entry(map, start, &entry))
3878 vm_map_unlock_read(map);
3887 * The map in question should be locked.
3891 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3895 VM_MAP_ASSERT_LOCKED(map);
3896 KASSERT(entry->wired_count > 0,
3899 size = entry->end - entry->start;
3900 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3902 pmap_unwire(map->pmap, entry->start, entry->end);
3903 vm_object_unwire(entry->object.vm_object, entry->offset, size,
3905 entry->wired_count = 0;
3912 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3913 vm_object_deallocate(entry->object.vm_object);
3920 * Deallocate the given entry from the target map.
3923 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3929 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3930 object = entry->object.vm_object;
3932 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3933 MPASS(entry->cred == NULL);
3934 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3936 vm_map_entry_deallocate(entry, vm_map_is_system(map));
3940 size = entry->end - entry->start;
3941 map->size -= size;
3943 if (entry->cred != NULL) {
3944 swap_release_by_cred(size, entry->cred);
3945 crfree(entry->cred);
3948 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3949 entry->object.vm_object = NULL;
3950 } else if ((object->flags & OBJ_ANON) != 0 ||
3952 KASSERT(entry->cred == NULL || object->cred == NULL ||
3953 (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3955 offidxstart = OFF_TO_IDX(entry->offset);
3958 if (object->ref_count != 1 &&
3959 ((object->flags & OBJ_ONEMAPPING) != 0 ||
3971 if (offidxend >= object->size &&
3972 offidxstart < object->size) {
3973 size1 = object->size;
3974 object->size = offidxstart;
3975 if (object->cred != NULL) {
3976 size1 -= object->size;
3977 KASSERT(object->charge >= ptoa(size1),
3980 object->cred);
3981 object->charge -= ptoa(size1);
3987 if (vm_map_is_system(map))
3990 entry->defer_next = curthread->td_map_def_user;
3991 curthread->td_map_def_user = entry;
3999 * map.
4002 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
4007 VM_MAP_ASSERT_LOCKED(map);
4016 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
4019 for (; entry->start < end; entry = next_entry) {
4025 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
4026 (vm_map_pmap(map) != kernel_pmap &&
4031 saved_start = entry->start;
4032 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4033 last_timestamp = map->timestamp;
4034 (void) vm_map_unlock_and_wait(map, 0);
4035 vm_map_lock(map);
4036 if (last_timestamp + 1 != map->timestamp) {
4038 * Look again for the entry because the map was
4043 rv = vm_map_lookup_clip_start(map, saved_start,
4053 rv = vm_map_clip_end(map, entry, end);
4062 if (entry->wired_count != 0)
4063 vm_map_entry_unwire(map, entry);
4070 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
4071 entry->object.vm_object != NULL)
4072 pmap_map_delete(map->pmap, entry->start, entry->end);
4080 vm_map_entry_delete(map, entry);
4088 * Remove the given address range from the target map.
4092 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
4096 vm_map_lock(map);
4097 VM_MAP_RANGE_CHECK(map, start, end);
4098 result = vm_map_delete(map, start, end);
4099 vm_map_unlock(map);
4106 * Assert that the target map allows the specified privilege on the
4115 * The map must be locked. A read lock is sufficient.
4118 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
4124 if (!vm_map_lookup_entry(map, start, &tmp_entry))
4132 if (start < entry->start)
4137 if ((entry->protection & protection) != protection)
4140 start = entry->end;
4150 * Copies a swap-backed object from an existing map entry to a
4162 src_object = src_entry->object.vm_object;
4164 if ((src_object->flags & OBJ_ANON) != 0) {
4167 if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
4169 src_object = src_entry->object.vm_object;
4176 if (src_entry->cred != NULL &&
4177 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4178 KASSERT(src_object->cred == NULL,
4181 src_object->cred = src_entry->cred;
4182 src_object->charge = size;
4184 dst_entry->object.vm_object = src_object;
4186 cred = curthread->td_ucred;
4188 dst_entry->cred = cred;
4190 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4192 src_entry->cred = cred;
4218 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
4221 if (src_entry->wired_count == 0 ||
4222 (src_entry->protection & VM_PROT_WRITE) == 0) {
4225 * write-protected.
4227 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
4228 (src_entry->protection & VM_PROT_WRITE) != 0) {
4229 pmap_protect(src_map->pmap,
4230 src_entry->start,
4231 src_entry->end,
4232 src_entry->protection & ~VM_PROT_WRITE);
4238 size = src_entry->end - src_entry->start;
4239 if ((src_object = src_entry->object.vm_object) != NULL) {
4240 if ((src_object->flags & OBJ_SWAP) != 0) {
4244 src_object = src_entry->object.vm_object;
4247 dst_entry->object.vm_object = src_object;
4249 src_entry->eflags |= MAP_ENTRY_COW |
4251 dst_entry->eflags |= MAP_ENTRY_COW |
4253 dst_entry->offset = src_entry->offset;
4254 if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
4261 * decrement object->un_pager writecount
4266 fake_entry->eflags = MAP_ENTRY_WRITECNT;
4267 src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
4269 fake_entry->object.vm_object = src_object;
4270 fake_entry->start = src_entry->start;
4271 fake_entry->end = src_entry->end;
4272 fake_entry->defer_next =
4273 curthread->td_map_def_user;
4274 curthread->td_map_def_user = fake_entry;
4277 pmap_copy(dst_map->pmap, src_map->pmap,
4278 dst_entry->start, dst_entry->end - dst_entry->start,
4279 src_entry->start);
4281 dst_entry->object.vm_object = NULL;
4282 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0)
4283 dst_entry->offset = 0;
4284 if (src_entry->cred != NULL) {
4285 dst_entry->cred = curthread->td_ucred;
4286 crhold(dst_entry->cred);
4292 * We don't want to make writeable wired pages copy-on-write.
4293 * Immediately copy these pages into the new map by simulating
4303 * Update the newly-forked vmspace each time a map entry is inherited
4305 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4314 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4316 entrysize = entry->end - entry->start;
4317 vm2->vm_map.size += entrysize;
4318 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
4319 vm2->vm_ssize += btoc(entrysize);
4320 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4321 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4322 newend = MIN(entry->end,
4323 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4324 vm2->vm_dsize += btoc(newend - entry->start);
4325 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4326 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4327 newend = MIN(entry->end,
4328 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4329 vm2->vm_tsize += btoc(newend - entry->start);
4336 * based on those of an existing process. The new map
4337 * is based on the old map, according to the inheritance
4338 * values on the regions in that map.
4342 * The source map must not be locked.
4354 old_map = &vm1->vm_map;
4361 vm2->vm_taddr = vm1->vm_taddr;
4362 vm2->vm_daddr = vm1->vm_daddr;
4363 vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4364 vm2->vm_stacktop = vm1->vm_stacktop;
4365 vm2->vm_shp_base = vm1->vm_shp_base;
4367 if (old_map->busy)
4369 new_map = &vm2->vm_map;
4373 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4375 sx_xunlock(&old_map->lock);
4376 sx_xunlock(&new_map->lock);
4382 new_map->anon_loc = old_map->anon_loc;
4383 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART |
4387 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4390 inh = old_entry->inheritance;
4391 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4404 object = old_entry->object.vm_object;
4407 object = old_entry->object.vm_object;
4415 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4416 vm_object_shadow(&old_entry->object.vm_object,
4417 &old_entry->offset,
4418 old_entry->end - old_entry->start,
4419 old_entry->cred,
4422 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4423 old_entry->cred = NULL;
4431 object = old_entry->object.vm_object;
4435 if (old_entry->cred != NULL) {
4436 KASSERT(object->cred == NULL,
4438 object->cred = old_entry->cred;
4439 object->charge = old_entry->end -
4440 old_entry->start;
4441 old_entry->cred = NULL;
4450 if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4451 object->type == OBJT_VNODE) {
4452 KASSERT(((struct vnode *)object->
4453 handle)->v_writecount > 0,
4456 KASSERT(object->un_pager.vnp.
4469 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4471 new_entry->wiring_thread = NULL;
4472 new_entry->wired_count = 0;
4473 if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4475 new_entry->start, new_entry->end);
4480 * Insert the entry into the new map -- we know we're
4481 * inserting at the end of the new map.
4487 * Update the physical map
4489 pmap_copy(new_map->pmap, old_map->pmap,
4490 new_entry->start,
4491 (old_entry->end - old_entry->start),
4492 old_entry->start);
4497 * Clone the entry and link into the map.
4504 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4506 new_entry->wiring_thread = NULL;
4507 new_entry->wired_count = 0;
4508 new_entry->object.vm_object = NULL;
4509 new_entry->cred = NULL;
4525 new_entry->start = old_entry->start;
4526 new_entry->end = old_entry->end;
4527 new_entry->eflags = old_entry->eflags &
4531 new_entry->protection = old_entry->protection;
4532 new_entry->max_protection = old_entry->max_protection;
4533 new_entry->inheritance = VM_INHERIT_ZERO;
4538 new_entry->cred = curthread->td_ucred;
4539 crhold(new_entry->cred);
4540 *fork_charge += (new_entry->end - new_entry->start);
4547 * map entries, which cannot be done until both old_map and
4550 sx_xunlock(&old_map->lock);
4551 sx_xunlock(&new_map->lock);
4562 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4569 MPASS((map->flags & MAP_WIREFUTURE) == 0);
4572 vm_map_lock(map);
4575 if (map->size + init_ssize > vmemlim) {
4579 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4582 vm_map_unlock(map);
4592 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4604 !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4606 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4607 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4614 init_ssize = max_ssize - sgp;
4617 if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4623 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4627 * We initially map a stack of only init_ssize, at the top of
4634 bot = addrbos + max_ssize - init_ssize;
4638 rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow,
4642 KASSERT(new_entry->end == top || new_entry->start == bot,
4644 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4648 rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4652 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0,
4653 ("entry %p not gap %#x", gap_entry, gap_entry->eflags));
4654 KASSERT((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0,
4656 gap_entry->eflags));
4660 * read-ahead logic is never used for it. Re-use
4667 gap_entry->next_read = sgp;
4668 gap_entry->offset = prot | PROT_MAX(max);
4670 (void)vm_map_delete(map, bot, top);
4676 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we
4680 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4684 struct vmspace *vm;
4699 vm = p->p_vmspace;
4706 if (p != initproc && (map != &p->p_vmspace->vm_map ||
4707 p->p_textvp == NULL))
4710 MPASS(!vm_map_is_system(map));
4717 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4719 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4721 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0) {
4723 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4724 stack_entry->start != gap_entry->end)
4726 grow_amount = round_page(stack_entry->start - addr);
4730 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4731 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4732 gap_entry->next_read;
4733 max_grow = gap_entry->end - gap_entry->start;
4736 max_grow -= guard;
4744 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4745 addr < (vm_offset_t)vm->vm_stacktop;
4746 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4753 ctob(vm->vm_ssize) + grow_amount)) {
4764 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4765 grow_amount = trunc_page((vm_size_t)stacklim) -
4766 ctob(vm->vm_ssize);
4773 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4774 grow_amount = limit - ctob(vm->vm_ssize);
4777 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4778 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4786 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4797 if (map->size + grow_amount > vmemlim) {
4804 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4813 if (vm_map_lock_upgrade(map)) {
4815 vm_map_lock_read(map);
4823 prot = PROT_EXTRACT(gap_entry->offset);
4824 max = PROT_MAX_EXTRACT(gap_entry->offset);
4825 sgp = gap_entry->next_read;
4827 grow_start = gap_entry->end - grow_amount;
4828 if (gap_entry->start + grow_amount == gap_entry->end) {
4829 gap_start = gap_entry->start;
4830 gap_end = gap_entry->end;
4831 vm_map_entry_delete(map, gap_entry);
4834 MPASS(gap_entry->start < gap_entry->end - grow_amount);
4835 vm_map_entry_resize(map, gap_entry, -grow_amount);
4838 rv = vm_map_insert(map, NULL, 0, grow_start,
4842 rv1 = vm_map_insert1(map, NULL, 0, gap_start,
4847 gap_entry->next_read = sgp;
4848 gap_entry->offset = prot | PROT_MAX(max);
4850 vm_map_entry_resize(map, gap_entry,
4855 vm->vm_ssize += btoc(grow_amount);
4860 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4861 rv = vm_map_wire_locked(map, grow_start,
4865 vm_map_lock_downgrade(map);
4871 error = racct_set(p, RACCT_VMEM, map->size);
4875 ptoa(pmap_wired_count(map->pmap)));
4878 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4888 * Unshare the specified VM space for exec. If other processes are
4894 struct vmspace *oldvmspace = p->p_vmspace;
4897 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4902 newvmspace->vm_swrss = oldvmspace->vm_swrss;
4911 p->p_vmspace = newvmspace;
4913 if (p == curthread->td_proc)
4915 curthread->td_pflags |= TDP_EXECVMSPC;
4920 * Unshare the specified VM space for forcing COW. This
4926 struct vmspace *oldvmspace = p->p_vmspace;
4932 * cannot concurrently transition 1 -> 2.
4934 if (refcount_load(&oldvmspace->vm_refcnt) == 1)
4940 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4945 p->p_vmspace = newvmspace;
4947 if (p == curthread->td_proc)
4956 * Finds the VM object, offset, and
4958 * specified map, assuming a page fault of the
4961 * Leaves the map in question locked for read; return
4963 * call is performed. Note that the map argument
4964 * is in/out; the returned map must be used in
4971 * specified, the map may be changed to perform virtual
4986 vm_map_t map = *var_map;
4995 vm_map_lock_read(map);
5001 if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
5002 vm_map_unlock_read(map);
5011 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5012 vm_map_t old_map = map;
5014 *var_map = map = entry->object.sub_map;
5022 prot = entry->protection;
5025 if (prot == VM_PROT_NONE && map != kernel_map &&
5026 (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
5027 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 &&
5028 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
5033 vm_map_unlock_read(map);
5036 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
5039 ("entry %p flags %x", entry, entry->eflags));
5041 (entry->max_protection & VM_PROT_WRITE) == 0 &&
5042 (entry->eflags & MAP_ENTRY_COW) == 0) {
5043 vm_map_unlock_read(map);
5051 *wired = (entry->wired_count != 0);
5053 fault_type = entry->protection;
5054 size = entry->end - entry->start;
5057 * If the entry was copy-on-write, we either ...
5059 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5062 * now since we've got the map locked.
5072 * -- one just moved from the map to the new
5075 if (vm_map_lock_upgrade(map))
5078 if (entry->cred == NULL) {
5083 cred = curthread->td_ucred;
5087 vm_map_unlock(map);
5090 entry->cred = cred;
5092 eobject = entry->object.vm_object;
5093 vm_object_shadow(&entry->object.vm_object,
5094 &entry->offset, size, entry->cred, false);
5095 if (eobject == entry->object.vm_object) {
5099 swap_release_by_cred(size, entry->cred);
5100 crfree(entry->cred);
5102 entry->cred = NULL;
5103 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
5105 vm_map_lock_downgrade(map);
5108 * We're attempting to read a copy-on-write page --
5118 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) {
5119 if (vm_map_lock_upgrade(map))
5121 entry->object.vm_object = vm_object_allocate_anon(atop(size),
5122 NULL, entry->cred, size);
5123 entry->offset = 0;
5124 entry->cred = NULL;
5125 vm_map_lock_downgrade(map);
5130 * copy-on-write or empty, it has been fixed up.
5132 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5133 *object = entry->object.vm_object;
5143 * KERN_FAILURE instead of blocking on map lock or memory allocation.
5156 vm_map_t map = *var_map;
5163 if (!vm_map_lookup_entry(map, vaddr, out_entry))
5171 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5177 prot = entry->protection;
5186 *wired = (entry->wired_count != 0);
5188 fault_type = entry->protection;
5190 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5192 * Fail if the entry was copy-on-write for a write fault.
5197 * We're attempting to read a copy-on-write page --
5206 if (entry->object.vm_object == NULL && !vm_map_is_system(map))
5211 * copy-on-write or empty, it has been fixed up.
5213 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5214 *object = entry->object.vm_object;
5227 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5230 * Unlock the main-level map
5232 vm_map_unlock_read(map);
5236 vm_map_max_KBI(const struct vm_map *map)
5239 return (vm_map_max(map));
5243 vm_map_min_KBI(const struct vm_map *map)
5246 return (vm_map_min(map));
5250 vm_map_pmap_KBI(vm_map_t map)
5253 return (map->pmap);
5257 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
5260 return (vm_map_range_valid(map, start, end));
5265 _vm_map_assert_consistent(vm_map_t map, int check)
5272 ++map->nupdates;
5277 header = prev = &map->header;
5278 VM_MAP_ENTRY_FOREACH(entry, map) {
5279 KASSERT(prev->end <= entry->start,
5280 ("map %p prev->end = %jx, start = %jx", map,
5281 (uintmax_t)prev->end, (uintmax_t)entry->start));
5282 KASSERT(entry->start < entry->end,
5283 ("map %p start = %jx, end = %jx", map,
5284 (uintmax_t)entry->start, (uintmax_t)entry->end));
5285 KASSERT(entry->left == header ||
5286 entry->left->start < entry->start,
5287 ("map %p left->start = %jx, start = %jx", map,
5288 (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5289 KASSERT(entry->right == header ||
5290 entry->start < entry->right->start,
5291 ("map %p start = %jx, right->start = %jx", map,
5292 (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5293 cur = map->root;
5296 if (entry->start < cur->start) {
5298 cur = cur->left;
5300 ("map %p cannot find %jx",
5301 map, (uintmax_t)entry->start));
5302 } else if (cur->end <= entry->start) {
5304 cur = cur->right;
5306 ("map %p cannot find %jx",
5307 map, (uintmax_t)entry->start));
5310 ("map %p cannot find %jx",
5311 map, (uintmax_t)entry->start));
5317 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5318 ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5319 (uintmax_t)entry->max_free,
5323 KASSERT(prev->end <= entry->start,
5324 ("map %p prev->end = %jx, start = %jx", map,
5325 (uintmax_t)prev->end, (uintmax_t)entry->start));
5336 vm_map_print(vm_map_t map)
5340 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5341 (void *)map,
5342 (void *)map->pmap, map->nentries, map->timestamp);
5345 prev = &map->header;
5346 VM_MAP_ENTRY_FOREACH(entry, map) {
5347 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5348 (void *)entry, (void *)entry->start, (void *)entry->end,
5349 entry->eflags);
5355 entry->protection,
5356 entry->max_protection,
5358 entry->inheritance]);
5359 if (entry->wired_count != 0)
5362 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5364 (void *)entry->object.sub_map,
5365 (uintmax_t)entry->offset);
5366 if (prev == &map->header ||
5367 prev->object.sub_map !=
5368 entry->object.sub_map) {
5370 vm_map_print((vm_map_t)entry->object.sub_map);
5371 db_indent -= 2;
5374 if (entry->cred != NULL)
5375 db_printf(", ruid %d", entry->cred->cr_ruid);
5377 (void *)entry->object.vm_object,
5378 (uintmax_t)entry->offset);
5379 if (entry->object.vm_object && entry->object.vm_object->cred)
5381 entry->object.vm_object->cred->cr_ruid,
5382 (uintmax_t)entry->object.vm_object->charge);
5383 if (entry->eflags & MAP_ENTRY_COW)
5385 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5388 if (prev == &map->header ||
5389 prev->object.vm_object !=
5390 entry->object.vm_object) {
5393 entry->object.vm_object,
5395 db_indent -= 2;
5400 db_indent -= 2;
5403 DB_SHOW_COMMAND(map, map)
5407 db_printf("usage: show map <addr>\n");
5423 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5424 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5425 (void *)vmspace_pmap(p->p_vmspace));
5427 vm_map_print((vm_map_t)&p->p_vmspace->vm_map);