Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping

1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
62 * Virtual memory mapping module.
100 * Virtual memory maps provide for the mapping, protection,
107 * Maps consist of an ordered doubly-linked list of simple
108 * entries; a self-adjusting binary search tree of these
121 * another, and then marking both regions as copy-on-write.
149 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
150 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
151 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
238 * The worst-case upper bound on the number of kernel map entries that may be
262 * Disable the use of per-CPU buckets: map entry allocation is in vm_map_startup()
294 map = &vm->vm_map; in vmspace_zinit()
297 sx_init(&map->lock, "vm map (user)"); in vmspace_zinit()
309 KASSERT(vm->vm_map.nentries == 0, in vmspace_zdtor()
310 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); in vmspace_zdtor()
311 KASSERT(vm->vm_map.size == 0, in vmspace_zdtor()
312 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); in vmspace_zdtor()
326 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); in vmspace_alloc()
332 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); in vmspace_alloc()
333 refcount_init(&vm->vm_refcnt, 1); in vmspace_alloc()
334 vm->vm_shm = NULL; in vmspace_alloc()
335 vm->vm_swrss = 0; in vmspace_alloc()
336 vm->vm_tsize = 0; in vmspace_alloc()
337 vm->vm_dsize = 0; in vmspace_alloc()
338 vm->vm_ssize = 0; in vmspace_alloc()
339 vm->vm_taddr = 0; in vmspace_alloc()
340 vm->vm_daddr = 0; in vmspace_alloc()
341 vm->vm_maxsaddr = 0; in vmspace_alloc()
377 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), in vmspace_dofree()
378 vm_map_max(&vm->vm_map)); in vmspace_dofree()
381 vm->vm_map.pmap = NULL; in vmspace_dofree()
392 if (refcount_release(&vm->vm_refcnt)) in vmspace_free()
402 vm = p->p_vmspace; in vmspace_exitfree()
403 p->p_vmspace = NULL; in vmspace_exitfree()
416 p = td->td_proc; in vmspace_exit()
417 vm = p->p_vmspace; in vmspace_exit()
427 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { in vmspace_exit()
428 if (p->p_vmspace != &vmspace0) { in vmspace_exit()
430 p->p_vmspace = &vmspace0; in vmspace_exit()
434 released = refcount_release(&vm->vm_refcnt); in vmspace_exit()
441 if (p->p_vmspace != vm) { in vmspace_exit()
443 p->p_vmspace = vm; in vmspace_exit()
449 p->p_vmspace = &vmspace0; in vmspace_exit()
468 vm = p->p_vmspace; in vmspace_acquire_ref()
469 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { in vmspace_acquire_ref()
473 if (vm != p->p_vmspace) { in vmspace_acquire_ref()
492 * a result, the 'newvm' vmspace always has a non-zero reference
504 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, in vmspace_switch_aio()
507 oldvm = curproc->p_vmspace; in vmspace_switch_aio()
514 curproc->p_vmspace = newvm; in vmspace_switch_aio()
515 refcount_acquire(&newvm->vm_refcnt); in vmspace_switch_aio()
517 /* Activate the new mapping. */ in vmspace_switch_aio()
528 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock()
530 sx_xlock_(&map->lock, file, line); in _vm_map_lock()
531 map->timestamp++; in _vm_map_lock()
541 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) in vm_map_entry_set_vnode_text()
543 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_set_vnode_text()
545 object = entry->object.vm_object; in vm_map_entry_set_vnode_text()
547 if ((object->flags & OBJ_ANON) != 0) in vm_map_entry_set_vnode_text()
548 object = object->handle; in vm_map_entry_set_vnode_text()
550 KASSERT(object->backing_object == NULL, in vm_map_entry_set_vnode_text()
551 ("non-anon object %p shadows", object)); in vm_map_entry_set_vnode_text()
553 entry, entry->object.vm_object)); in vm_map_entry_set_vnode_text()
588 entry = td->td_map_def_user; in vm_map_process_deferred()
589 td->td_map_def_user = NULL; in vm_map_process_deferred()
591 next = entry->defer_next; in vm_map_process_deferred()
592 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | in vm_map_process_deferred()
595 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { in vm_map_process_deferred()
600 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_process_deferred()
602 object = entry->object.vm_object; in vm_map_process_deferred()
604 vm_pager_release_writecount(object, entry->start, in vm_map_process_deferred()
605 entry->end); in vm_map_process_deferred()
619 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_assert_locked()
621 sx_assert_(&map->lock, SA_XLOCKED, file, line); in _vm_map_assert_locked()
642 if (map->nupdates > map->nentries) { \
644 map->nupdates = 0; \
663 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { in _vm_map_unlock()
665 map->flags &= ~MAP_REPLENISH; in _vm_map_unlock()
668 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock()
670 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock()
680 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock_read()
682 sx_slock_(&map->lock, file, line); in _vm_map_lock_read()
690 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_read()
692 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_read()
694 sx_sunlock_(&map->lock, file, line); in _vm_map_unlock_read()
705 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock()
706 !sx_try_xlock_(&map->lock, file, line); in _vm_map_trylock()
708 map->timestamp++; in _vm_map_trylock()
718 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock_read()
719 !sx_try_slock_(&map->lock, file, line); in _vm_map_trylock_read()
728 * non-zero value if the upgrade fails. If the upgrade fails, the map is
739 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_upgrade()
741 if (!sx_try_upgrade_(&map->lock, file, line)) { in _vm_map_lock_upgrade()
742 last_timestamp = map->timestamp; in _vm_map_lock_upgrade()
743 sx_sunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
749 sx_xlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
750 if (last_timestamp != map->timestamp) { in _vm_map_lock_upgrade()
751 sx_xunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
756 map->timestamp++; in _vm_map_lock_upgrade()
765 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_lock_downgrade()
767 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_downgrade()
770 sx_downgrade_(&map->lock, file, line); in _vm_map_lock_downgrade()
777 * Returns a non-zero value if the caller holds a write (exclusive) lock
785 return (mtx_owned(&map->system_mtx)); in vm_map_locked()
786 return (sx_xlocked(&map->lock)); in vm_map_locked()
810 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_and_wait()
812 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_and_wait()
814 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock_and_wait()
816 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", in _vm_map_unlock_and_wait()
837 wakeup(&map->root); in vm_map_wakeup()
845 map->busy++; in vm_map_busy()
853 KASSERT(map->busy, ("vm_map_unbusy: not busy")); in vm_map_unbusy()
854 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { in vm_map_unbusy()
856 wakeup(&map->busy); in vm_map_unbusy()
865 while (map->busy) { in vm_map_wait_busy()
868 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); in vm_map_wait_busy()
870 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); in vm_map_wait_busy()
872 map->timestamp++; in vm_map_wait_busy()
889 map->header.eflags = MAP_ENTRY_HEADER; in _vm_map_init()
890 map->pmap = pmap; in _vm_map_init()
891 map->header.end = min; in _vm_map_init()
892 map->header.start = max; in _vm_map_init()
893 map->flags = 0; in _vm_map_init()
894 map->header.left = map->header.right = &map->header; in _vm_map_init()
895 map->root = NULL; in _vm_map_init()
896 map->timestamp = 0; in _vm_map_init()
897 map->busy = 0; in _vm_map_init()
898 map->anon_loc = 0; in _vm_map_init()
900 map->nupdates = 0; in _vm_map_init()
908 sx_init(&map->lock, "vm map (user)"); in vm_map_init()
916 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | in vm_map_init_system()
958 kernel_map->flags |= MAP_REPLENISH; in vm_map_entry_create()
981 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | in vm_map_entry_set_behavior()
996 return (root->left != left_ancestor ? in vm_map_entry_max_free_left()
997 root->left->max_free : root->start - left_ancestor->end); in vm_map_entry_max_free_left()
1004 return (root->right != right_ancestor ? in vm_map_entry_max_free_right()
1005 root->right->max_free : right_ancestor->start - root->end); in vm_map_entry_max_free_right()
1020 prior = entry->left; in vm_map_entry_pred()
1021 if (prior->right->start < entry->start) { in vm_map_entry_pred()
1023 prior = prior->right; in vm_map_entry_pred()
1024 while (prior->right != entry); in vm_map_entry_pred()
1041 * Infer root->right->max_free == root->max_free when \
1042 * y->max_free < root->max_free || root->max_free == 0. \
1045 y = root->left; \
1046 max_free = root->max_free; \
1051 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1055 z = y->right; \
1057 root->left = z; \
1058 y->right = root; \
1059 if (max_free < y->max_free) \
1060 root->max_free = max_free = \
1061 vm_size_max(max_free, z->max_free); \
1062 } else if (max_free < y->max_free) \
1063 root->max_free = max_free = \
1064 vm_size_max(max_free, root->start - y->end);\
1066 y = root->left; \
1068 /* Copy right->max_free. Put root on rlist. */ \
1069 root->max_free = max_free; \
1072 root->left = rlist; \
1082 * Infer root->left->max_free == root->max_free when \
1083 * y->max_free < root->max_free || root->max_free == 0. \
1086 y = root->right; \
1087 max_free = root->max_free; \
1092 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1096 z = y->left; \
1098 root->right = z; \
1099 y->left = root; \
1100 if (max_free < y->max_free) \
1101 root->max_free = max_free = \
1102 vm_size_max(max_free, z->max_free); \
1103 } else if (max_free < y->max_free) \
1104 root->max_free = max_free = \
1105 vm_size_max(max_free, y->start - root->end);\
1107 y = root->right; \
1109 /* Copy left->max_free. Put root on llist. */ \
1110 root->max_free = max_free; \
1113 root->right = llist; \
1121 * subtrees with root->max_free < length as empty trees. llist and rlist are
1122 * the two sides in reverse order (bottom-up), with llist linked by the right
1124 * lists terminated by &map->header. This function, and the subsequent call to
1126 * values in &map->header.
1134 left = right = &map->header; in vm_map_splay_split()
1135 root = map->root; in vm_map_splay_split()
1136 while (root != NULL && root->max_free >= length) { in vm_map_splay_split()
1137 KASSERT(left->end <= root->start && in vm_map_splay_split()
1138 root->end <= right->start, in vm_map_splay_split()
1140 if (addr < root->start) { in vm_map_splay_split()
1142 y->max_free >= length && addr < y->start); in vm_map_splay_split()
1143 } else if (addr >= root->end) { in vm_map_splay_split()
1145 y->max_free >= length && addr >= y->end); in vm_map_splay_split()
1160 hi = root->right == right ? NULL : root->right; in vm_map_splay_findnext()
1175 lo = root->left == left ? NULL : root->left; in vm_map_splay_findprev()
1205 * llist->max_free and max_free. Update with the in vm_map_splay_merge_left_walk()
1208 llist->max_free = max_free = in vm_map_splay_merge_left_walk()
1209 vm_size_max(llist->max_free, max_free); in vm_map_splay_merge_left_walk()
1210 vm_map_entry_swap(&llist->right, &tail); in vm_map_splay_merge_left_walk()
1213 root->left = tail; in vm_map_splay_merge_left_walk()
1226 max_free = root->start - llist->end; in vm_map_splay_merge_pred()
1231 root->left = header; in vm_map_splay_merge_pred()
1232 header->right = root; in vm_map_splay_merge_pred()
1249 root->left == llist ? root : root->left, in vm_map_splay_merge_left()
1262 * rlist->max_free and max_free. Update with the in vm_map_splay_merge_right_walk()
1265 rlist->max_free = max_free = in vm_map_splay_merge_right_walk()
1266 vm_size_max(rlist->max_free, max_free); in vm_map_splay_merge_right_walk()
1267 vm_map_entry_swap(&rlist->left, &tail); in vm_map_splay_merge_right_walk()
1270 root->right = tail; in vm_map_splay_merge_right_walk()
1283 max_free = rlist->start - root->end; in vm_map_splay_merge_succ()
1288 root->right = header; in vm_map_splay_merge_succ()
1289 header->left = root; in vm_map_splay_merge_succ()
1306 root->right == rlist ? root : root->right, in vm_map_splay_merge_right()
1315 * The Sleator and Tarjan top-down splay algorithm with the
1316 * following variation. Max_free must be computed bottom-up, so
1343 header = &map->header; in vm_map_splay()
1354 llist = root->right; in vm_map_splay()
1363 rlist = root->left; in vm_map_splay()
1370 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_splay()
1371 map->root = root; in vm_map_splay()
1393 map->nentries, entry); in vm_map_entry_link()
1395 map->nentries++; in vm_map_entry_link()
1396 header = &map->header; in vm_map_entry_link()
1397 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_link()
1405 } else if (entry->start == root->start) { in vm_map_entry_link()
1412 KASSERT(entry->end < root->end, in vm_map_entry_link()
1415 if ((root->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_entry_link()
1416 root->offset += entry->end - root->start; in vm_map_entry_link()
1417 root->start = entry->end; in vm_map_entry_link()
1419 max_free_right = root->max_free = vm_size_max( in vm_map_entry_link()
1429 KASSERT(entry->end == root->end, in vm_map_entry_link()
1432 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_entry_link()
1433 entry->offset += entry->start - root->start; in vm_map_entry_link()
1434 root->end = entry->start; in vm_map_entry_link()
1435 max_free_left = root->max_free = vm_size_max( in vm_map_entry_link()
1440 entry->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_link()
1441 map->root = entry; in vm_map_entry_link()
1458 header = &map->header; in vm_map_entry_unlink()
1459 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_unlink()
1466 rlist->start = root->start; in vm_map_entry_unlink()
1467 MPASS((rlist->eflags & MAP_ENTRY_STACK_GAP) == 0); in vm_map_entry_unlink()
1468 rlist->offset = root->offset; in vm_map_entry_unlink()
1472 llist = root->right; in vm_map_entry_unlink()
1477 rlist = root->left; in vm_map_entry_unlink()
1481 header->left = header->right = header; in vm_map_entry_unlink()
1485 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_unlink()
1486 map->root = root; in vm_map_entry_unlink()
1488 map->nentries--; in vm_map_entry_unlink()
1490 map->nentries, entry); in vm_map_entry_unlink()
1507 header = &map->header; in vm_map_entry_resize()
1508 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_resize()
1511 entry->end += grow_amount; in vm_map_entry_resize()
1512 root->max_free = vm_size_max( in vm_map_entry_resize()
1515 map->root = root; in vm_map_entry_resize()
1518 __func__, map, map->nentries, entry); in vm_map_entry_resize()
1544 header = &map->header; in vm_map_lookup_entry()
1545 cur = map->root; in vm_map_lookup_entry()
1550 if (address >= cur->start && cur->end > address) { in vm_map_lookup_entry()
1555 sx_try_upgrade(&map->lock)) { in vm_map_lookup_entry()
1565 sx_downgrade(&map->lock); in vm_map_lookup_entry()
1573 if (address < cur->start) { in vm_map_lookup_entry()
1578 return (address < cur->end); in vm_map_lookup_entry()
1586 if (address < cur->start) { in vm_map_lookup_entry()
1588 cur = cur->left; in vm_map_lookup_entry()
1591 } else if (cur->end <= address) { in vm_map_lookup_entry()
1593 cur = cur->right; in vm_map_lookup_entry()
1641 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | in vm_map_insert1()
1656 if (next_entry->start < end) in vm_map_insert1()
1692 bdry = pagesizes[bidx] - 1; in vm_map_insert1()
1703 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) in vm_map_insert1()
1707 object->cred == NULL, in vm_map_insert1()
1709 cred = curthread->td_ucred; in vm_map_insert1()
1723 * OBJ_ONEMAPPING must be cleared unless this mapping in vm_map_insert1()
1724 * is trivially proven to be the only mapping for any in vm_map_insert1()
1729 if ((object->flags & OBJ_ANON) != 0) { in vm_map_insert1()
1731 if (object->ref_count > 1 || object->shadow_count != 0) in vm_map_insert1()
1735 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1738 prev_entry->end == start && (prev_entry->cred == cred || in vm_map_insert1()
1739 (prev_entry->object.vm_object != NULL && in vm_map_insert1()
1740 prev_entry->object.vm_object->cred == cred)) && in vm_map_insert1()
1741 vm_object_coalesce(prev_entry->object.vm_object, in vm_map_insert1()
1742 prev_entry->offset, in vm_map_insert1()
1743 (vm_size_t)(prev_entry->end - prev_entry->start), in vm_map_insert1()
1744 (vm_size_t)(end - prev_entry->end), cred != NULL && in vm_map_insert1()
1751 if (prev_entry->inheritance == inheritance && in vm_map_insert1()
1752 prev_entry->protection == prot && in vm_map_insert1()
1753 prev_entry->max_protection == max && in vm_map_insert1()
1754 prev_entry->wired_count == 0) { in vm_map_insert1()
1755 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1758 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1759 map->size += end - prev_entry->end; in vm_map_insert1()
1761 end - prev_entry->end); in vm_map_insert1()
1773 object = prev_entry->object.vm_object; in vm_map_insert1()
1774 offset = prev_entry->offset + in vm_map_insert1()
1775 (prev_entry->end - prev_entry->start); in vm_map_insert1()
1777 if (cred != NULL && object != NULL && object->cred != NULL && in vm_map_insert1()
1778 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_insert1()
1790 new_entry->start = start; in vm_map_insert1()
1791 new_entry->end = end; in vm_map_insert1()
1792 new_entry->cred = NULL; in vm_map_insert1()
1794 new_entry->eflags = protoeflags; in vm_map_insert1()
1795 new_entry->object.vm_object = object; in vm_map_insert1()
1796 new_entry->offset = offset; in vm_map_insert1()
1798 new_entry->inheritance = inheritance; in vm_map_insert1()
1799 new_entry->protection = prot; in vm_map_insert1()
1800 new_entry->max_protection = max; in vm_map_insert1()
1801 new_entry->wired_count = 0; in vm_map_insert1()
1802 new_entry->wiring_thread = NULL; in vm_map_insert1()
1803 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; in vm_map_insert1()
1804 new_entry->next_read = start; in vm_map_insert1()
1808 new_entry->cred = cred; in vm_map_insert1()
1814 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1815 map->size += new_entry->end - new_entry->start; in vm_map_insert1()
1828 end - start, cow & MAP_PREFAULT_PARTIAL); in vm_map_insert1()
1842 * If object is non-NULL, ref count must be bumped by caller
1870 * vm_map_max(map)-length+1 if insufficient space.
1886 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) in vm_map_findspace()
1887 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1890 if (map->root == NULL) in vm_map_findspace()
1897 * enough; otherwise set gap_end to start skip gap-checking and move in vm_map_findspace()
1900 header = &map->header; in vm_map_findspace()
1902 gap_end = rlist->start; in vm_map_findspace()
1904 start = root->end; in vm_map_findspace()
1905 if (root->right != rlist) in vm_map_findspace()
1911 rlist = root->left; in vm_map_findspace()
1916 llist = root->right; in vm_map_findspace()
1920 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_findspace()
1921 map->root = root; in vm_map_findspace()
1923 if (length <= gap_end - start) in vm_map_findspace()
1927 if (root->right == header || length > root->right->max_free) in vm_map_findspace()
1928 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1931 * Splay for the least large-enough gap in the right subtree. in vm_map_findspace()
1946 llist = root->right; in vm_map_findspace()
1949 root->max_free = vm_size_max(max_free_left, in vm_map_findspace()
1953 rlist = y->left; in vm_map_findspace()
1954 y->max_free = vm_size_max( in vm_map_findspace()
1957 root->max_free = vm_size_max(max_free_left, y->max_free); in vm_map_findspace()
1959 map->root = root; in vm_map_findspace()
1961 return (root->end); in vm_map_findspace()
1974 ("vm_map_fixed: non-NULL backing object for stack")); in vm_map_fixed()
2031 * specified alignment. Performs an address-ordered, first-fit search from
2110 * first-fit from the specified address; the region found is
2113 * If object is non-NULL, ref count must be bumped by caller
2142 ("non-NULL backing object for stack")); in vm_map_find_locked()
2146 (object->flags & OBJ_COLORED) == 0)) in vm_map_find_locked()
2153 en_aslr = (map->flags & MAP_ASLR) != 0; in vm_map_find_locked()
2155 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && in vm_map_find_locked()
2162 (map->flags & MAP_ASLR_IGNSTART) != 0) in vm_map_find_locked()
2166 curr_min_addr = map->anon_loc; in vm_map_find_locked()
2177 * When creating an anonymous mapping, try clustering in vm_map_find_locked()
2178 * with an existing anonymous mapping first. in vm_map_find_locked()
2183 * anonymous mapping. If this first attempt fails, in vm_map_find_locked()
2184 * perform a first-fit search of the available address in vm_map_find_locked()
2199 * mapping. Retry with free run. in vm_map_find_locked()
2201 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? in vm_map_find_locked()
2220 * mapping's alignment if that mapping is at in vm_map_find_locked()
2225 pidx--; in vm_map_find_locked()
2283 * placed an anonymous memory mapping at a lower address. in vm_map_find_locked()
2285 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || in vm_map_find_locked()
2286 *addr < map->anon_loc)) in vm_map_find_locked()
2287 map->anon_loc = *addr; in vm_map_find_locked()
2295 * and not as the minimum address where the mapping is created.
2338 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || in vm_map_mergeable_neighbors()
2339 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, in vm_map_mergeable_neighbors()
2342 return (prev->end == entry->start && in vm_map_mergeable_neighbors()
2343 prev->object.vm_object == entry->object.vm_object && in vm_map_mergeable_neighbors()
2344 (prev->object.vm_object == NULL || in vm_map_mergeable_neighbors()
2345 prev->offset + (prev->end - prev->start) == entry->offset) && in vm_map_mergeable_neighbors()
2346 prev->eflags == entry->eflags && in vm_map_mergeable_neighbors()
2347 prev->protection == entry->protection && in vm_map_mergeable_neighbors()
2348 prev->max_protection == entry->max_protection && in vm_map_mergeable_neighbors()
2349 prev->inheritance == entry->inheritance && in vm_map_mergeable_neighbors()
2350 prev->wired_count == entry->wired_count && in vm_map_mergeable_neighbors()
2351 prev->cred == entry->cred); in vm_map_mergeable_neighbors()
2362 * kept without causing a lock-order reversal with the vnode lock. in vm_map_merged_neighbor_dispose()
2365 * object->un_pager.vnp.writemappings, the writemappings value in vm_map_merged_neighbor_dispose()
2368 if (entry->object.vm_object != NULL) in vm_map_merged_neighbor_dispose()
2369 vm_object_deallocate(entry->object.vm_object); in vm_map_merged_neighbor_dispose()
2370 if (entry->cred != NULL) in vm_map_merged_neighbor_dispose()
2371 crfree(entry->cred); in vm_map_merged_neighbor_dispose()
2391 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && in vm_map_try_merge_entries()
2410 KASSERT(entry->object.vm_object == NULL, in vm_map_entry_back()
2412 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_back()
2414 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, in vm_map_entry_back()
2415 entry->cred, entry->end - entry->start); in vm_map_entry_back()
2416 entry->object.vm_object = object; in vm_map_entry_back()
2417 entry->offset = 0; in vm_map_entry_back()
2418 entry->cred = NULL; in vm_map_entry_back()
2432 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_charge_object()
2434 if (entry->object.vm_object == NULL && !vm_map_is_system(map) && in vm_map_entry_charge_object()
2435 (entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_entry_charge_object()
2437 else if (entry->object.vm_object != NULL && in vm_map_entry_charge_object()
2438 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && in vm_map_entry_charge_object()
2439 entry->cred != NULL) { in vm_map_entry_charge_object()
2440 VM_OBJECT_WLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2441 KASSERT(entry->object.vm_object->cred == NULL, in vm_map_entry_charge_object()
2443 entry->object.vm_object->cred = entry->cred; in vm_map_entry_charge_object()
2444 entry->object.vm_object->charge = entry->end - entry->start; in vm_map_entry_charge_object()
2445 VM_OBJECT_WUNLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2446 entry->cred = NULL; in vm_map_entry_charge_object()
2471 if (new_entry->cred != NULL) in vm_map_entry_clone()
2472 crhold(entry->cred); in vm_map_entry_clone()
2473 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { in vm_map_entry_clone()
2474 vm_object_reference(new_entry->object.vm_object); in vm_map_entry_clone()
2477 * The object->un_pager.vnp.writemappings for the object of in vm_map_entry_clone()
2479 * virtual pages are re-distributed among the clipped entries, in vm_map_entry_clone()
2504 if (startaddr <= entry->start) in vm_map_clip_start()
2508 KASSERT(entry->end > startaddr && entry->start < startaddr, in vm_map_clip_start()
2513 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_start()
2523 new_entry->end = startaddr; in vm_map_clip_start()
2577 if (endaddr >= entry->end) in vm_map_clip_end()
2581 KASSERT(entry->start < endaddr && entry->end > endaddr, in vm_map_clip_end()
2586 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_end()
2596 new_entry->start = endaddr; in vm_map_clip_end()
2633 submap->flags |= MAP_IS_SUB_MAP; in vm_map_submap()
2638 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && in vm_map_submap()
2639 (entry->eflags & MAP_ENTRY_COW) == 0 && in vm_map_submap()
2640 entry->object.vm_object == NULL) { in vm_map_submap()
2647 entry->object.sub_map = submap; in vm_map_submap()
2648 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; in vm_map_submap()
2656 submap->flags &= ~MAP_IS_SUB_MAP; in vm_map_submap()
2671 * object's memory-resident pages. No further physical pages are
2674 * limited number of page mappings are created at the low-end of the
2675 * specified address range. (For this purpose, a superpage mapping
2676 * counts as one page mapping.) Otherwise, all resident pages within
2691 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2693 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2694 pmap_object_init_pt(map->pmap, addr, object, pindex, in vm_map_pmap_enter()
2704 if (psize + pindex > object->size) { in vm_map_pmap_enter()
2705 if (pindex >= object->size) { in vm_map_pmap_enter()
2709 psize = object->size - pindex; in vm_map_pmap_enter()
2723 tmpidx = p->pindex - pindex; in vm_map_pmap_enter()
2737 /* Jump ahead if a superpage mapping is possible. */ in vm_map_pmap_enter()
2738 for (psind = p->psind; psind > 0; psind--) { in vm_map_pmap_enter()
2740 (pagesizes[psind] - 1)) == 0) { in vm_map_pmap_enter()
2741 mask = atop(pagesizes[psind]) - 1; in vm_map_pmap_enter()
2752 pmap_enter_object(map->pmap, start, addr + in vm_map_pmap_enter()
2758 pmap_enter_object(map->pmap, start, addr + ptoa(psize), in vm_map_pmap_enter()
2769 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); in vm_map_protect_guard()
2770 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_protect_guard()
2773 old_prot = PROT_EXTRACT(entry->offset); in vm_map_protect_guard()
2775 entry->offset = PROT_MAX(new_maxprot) | in vm_map_protect_guard()
2779 entry->offset = new_prot | PROT_MAX( in vm_map_protect_guard()
2780 PROT_MAX_EXTRACT(entry->offset)); in vm_map_protect_guard()
2815 if ((map->flags & MAP_WXORX) != 0 && in vm_map_protect()
2836 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { in vm_map_protect()
2844 while (!CONTAINS_BITS(first_entry->eflags, in vm_map_protect()
2848 start = first_entry->start; in vm_map_protect()
2859 for (entry = first_entry; entry->start < end; in vm_map_protect()
2861 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_protect()
2865 if ((entry->eflags & (MAP_ENTRY_GUARD | in vm_map_protect()
2868 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ? in vm_map_protect()
2869 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; in vm_map_protect()
2874 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) in vm_map_protect()
2879 * Postpone the operation until all in-transition map entries have in vm_map_protect()
2880 * stabilized. An in-transition entry might already have its pages in vm_map_protect()
2886 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_protect()
2893 * private (i.e., copy-on-write) mappings that are transitioning from in vm_map_protect()
2894 * read-only to read/write access. If a reservation fails, break out in vm_map_protect()
2903 for (entry = first_entry; entry->start < end; in vm_map_protect()
2912 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || in vm_map_protect()
2914 (entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2917 cred = curthread->td_ucred; in vm_map_protect()
2918 obj = entry->object.vm_object; in vm_map_protect()
2921 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { in vm_map_protect()
2922 if (!swap_reserve(entry->end - entry->start)) { in vm_map_protect()
2924 end = entry->end; in vm_map_protect()
2928 entry->cred = cred; in vm_map_protect()
2933 if ((obj->flags & OBJ_SWAP) == 0) { in vm_map_protect()
2939 * Charge for the whole object allocation now, since in vm_map_protect()
2940 * we cannot distinguish between non-charged and in vm_map_protect()
2941 * charged clipped mapping of the same object later. in vm_map_protect()
2943 KASSERT(obj->charge == 0, in vm_map_protect()
2946 if (!swap_reserve(ptoa(obj->size))) { in vm_map_protect()
2949 end = entry->end; in vm_map_protect()
2954 obj->cred = cred; in vm_map_protect()
2955 obj->charge = ptoa(obj->size); in vm_map_protect()
2965 entry->start < end; in vm_map_protect()
2971 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_protect()
2977 old_prot = entry->protection; in vm_map_protect()
2980 entry->max_protection = new_maxprot; in vm_map_protect()
2981 entry->protection = new_maxprot & old_prot; in vm_map_protect()
2984 entry->protection = new_prot; in vm_map_protect()
2990 * copy-on-write and enable write access in the physical map. in vm_map_protect()
2992 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && in vm_map_protect()
2993 (entry->protection & VM_PROT_WRITE) != 0 && in vm_map_protect()
2999 * about copy-on-write here. in vm_map_protect()
3001 if ((old_prot & ~entry->protection) != 0) { in vm_map_protect()
3002 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ in vm_map_protect()
3004 pmap_protect(map->pmap, entry->start, in vm_map_protect()
3005 entry->end, in vm_map_protect()
3006 entry->protection & MASK(entry)); in vm_map_protect()
3037 * various clipping operations. Otherwise we only need a read-lock in vm_map_madvise()
3083 for (; entry->start < end; prev_entry = entry, in vm_map_madvise()
3085 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_map_madvise()
3108 entry->eflags |= MAP_ENTRY_NOSYNC; in vm_map_madvise()
3111 entry->eflags &= ~MAP_ENTRY_NOSYNC; in vm_map_madvise()
3114 entry->eflags |= MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3117 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3138 for (; entry->start < end; in vm_map_madvise()
3142 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | in vm_map_madvise()
3149 * we hold the VM map read-locked, neither the in vm_map_madvise()
3154 entry->object.vm_object != NULL && in vm_map_madvise()
3155 entry->object.vm_object->backing_object != NULL) in vm_map_madvise()
3158 pstart = OFF_TO_IDX(entry->offset); in vm_map_madvise()
3159 pend = pstart + atop(entry->end - entry->start); in vm_map_madvise()
3160 useStart = entry->start; in vm_map_madvise()
3161 useEnd = entry->end; in vm_map_madvise()
3163 if (entry->start < start) { in vm_map_madvise()
3164 pstart += atop(start - entry->start); in vm_map_madvise()
3167 if (entry->end > end) { in vm_map_madvise()
3168 pend -= atop(entry->end - end); in vm_map_madvise()
3186 pmap_advise(map->pmap, useStart, useEnd, in vm_map_madvise()
3189 vm_object_madvise(entry->object.vm_object, pstart, in vm_map_madvise()
3193 * Pre-populate paging structures in the in vm_map_madvise()
3198 entry->wired_count == 0) { in vm_map_madvise()
3201 entry->protection, in vm_map_madvise()
3202 entry->object.vm_object, in vm_map_madvise()
3204 ptoa(pend - pstart), in vm_map_madvise()
3245 if (vm_map_lookup_entry(map, end - 1, &lentry)) { in vm_map_inherit()
3251 for (entry = start_entry; entry->start < end; in vm_map_inherit()
3253 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) in vm_map_inherit()
3260 for (entry = start_entry; entry->start < end; prev_entry = entry, in vm_map_inherit()
3262 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", in vm_map_inherit()
3263 entry, (uintmax_t)entry->end, (uintmax_t)end)); in vm_map_inherit()
3264 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || in vm_map_inherit()
3266 entry->inheritance = new_inheritance; in vm_map_inherit()
3280 * another held the lock, lookup a possibly-changed entry at or after the
3292 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_entry_in_transition()
3293 ("not in-tranition map entry %p", in_entry)); in vm_map_entry_in_transition()
3297 start = MAX(in_start, in_entry->start); in vm_map_entry_in_transition()
3298 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_entry_in_transition()
3299 last_timestamp = map->timestamp; in vm_map_entry_in_transition()
3306 if (last_timestamp + 1 == map->timestamp) in vm_map_entry_in_transition()
3352 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_unwire()
3353 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_unwire()
3382 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_unwire()
3383 entry->wiring_thread == NULL, in vm_map_unwire()
3385 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3386 entry->wiring_thread = curthread; in vm_map_unwire()
3393 entry->end < end && next_entry->start > entry->end) { in vm_map_unwire()
3394 end = entry->end; in vm_map_unwire()
3403 end = entry->end; in vm_map_unwire()
3418 for (; entry->start < end; in vm_map_unwire()
3425 * could be simultaneously wiring this new mapping in vm_map_unwire()
3429 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_unwire()
3430 entry->wiring_thread != curthread) { in vm_map_unwire()
3437 (entry->eflags & MAP_ENTRY_USER_WIRED))) { in vm_map_unwire()
3438 if (entry->wired_count == 1) in vm_map_unwire()
3441 entry->wired_count--; in vm_map_unwire()
3443 entry->eflags &= ~MAP_ENTRY_USER_WIRED; in vm_map_unwire()
3445 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_unwire()
3446 ("vm_map_unwire: in-transition flag missing %p", entry)); in vm_map_unwire()
3447 KASSERT(entry->wiring_thread == curthread, in vm_map_unwire()
3449 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3450 entry->wiring_thread = NULL; in vm_map_unwire()
3451 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_unwire()
3452 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_unwire()
3499 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && in vm_map_wire_entry_failure()
3500 entry->wired_count == 1, in vm_map_wire_entry_failure()
3502 KASSERT(failed_addr < entry->end, in vm_map_wire_entry_failure()
3509 if (failed_addr > entry->start) { in vm_map_wire_entry_failure()
3510 pmap_unwire(map->pmap, entry->start, failed_addr); in vm_map_wire_entry_failure()
3511 vm_object_unwire(entry->object.vm_object, entry->offset, in vm_map_wire_entry_failure()
3512 failed_addr - entry->start, PQ_ACTIVE); in vm_map_wire_entry_failure()
3516 * Assign an out-of-range value to represent the failure to wire this in vm_map_wire_entry_failure()
3519 entry->wired_count = -1; in vm_map_wire_entry_failure()
3566 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_wire_locked()
3567 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_wire_locked()
3594 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_wire_locked()
3595 entry->wiring_thread == NULL, in vm_map_wire_locked()
3597 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_wire_locked()
3598 entry->wiring_thread = curthread; in vm_map_wire_locked()
3599 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 in vm_map_wire_locked()
3600 || (entry->protection & prot) != prot) { in vm_map_wire_locked()
3601 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; in vm_map_wire_locked()
3603 end = entry->end; in vm_map_wire_locked()
3607 } else if (entry->wired_count == 0) { in vm_map_wire_locked()
3608 entry->wired_count++; in vm_map_wire_locked()
3610 npages = atop(entry->end - entry->start); in vm_map_wire_locked()
3613 entry->start); in vm_map_wire_locked()
3614 end = entry->end; in vm_map_wire_locked()
3620 * Release the map lock, relying on the in-transition in vm_map_wire_locked()
3623 saved_start = entry->start; in vm_map_wire_locked()
3624 saved_end = entry->end; in vm_map_wire_locked()
3625 last_timestamp = map->timestamp; in vm_map_wire_locked()
3644 if (last_timestamp + 1 != map->timestamp) { in vm_map_wire_locked()
3657 for (entry = next_entry; entry->end < saved_end; in vm_map_wire_locked()
3666 faddr < entry->end) in vm_map_wire_locked()
3675 end = entry->end; in vm_map_wire_locked()
3679 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3680 entry->wired_count++; in vm_map_wire_locked()
3688 entry->end < end && next_entry->start > entry->end) { in vm_map_wire_locked()
3689 end = entry->end; in vm_map_wire_locked()
3706 for (; entry->start < end; in vm_map_wire_locked()
3714 * wiring this new mapping entry. Detect these cases in vm_map_wire_locked()
3721 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_wire_locked()
3722 entry->wiring_thread != curthread) { in vm_map_wire_locked()
3728 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { in vm_map_wire_locked()
3732 entry->eflags |= MAP_ENTRY_USER_WIRED; in vm_map_wire_locked()
3733 } else if (entry->wired_count == -1) { in vm_map_wire_locked()
3738 entry->wired_count = 0; in vm_map_wire_locked()
3740 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3745 if (entry->wired_count == 1) { in vm_map_wire_locked()
3749 atop(entry->end - entry->start)); in vm_map_wire_locked()
3751 entry->wired_count--; in vm_map_wire_locked()
3753 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_wire_locked()
3754 ("vm_map_wire: in-transition flag missing %p", entry)); in vm_map_wire_locked()
3755 KASSERT(entry->wiring_thread == curthread, in vm_map_wire_locked()
3757 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | in vm_map_wire_locked()
3759 entry->wiring_thread = NULL; in vm_map_wire_locked()
3760 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_wire_locked()
3761 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_wire_locked()
3784 * flushing the current region containing start.
3810 start = first_entry->start; in vm_map_sync()
3811 end = first_entry->end; in vm_map_sync()
3815 * Make a first pass to check for user-wired memory, holes, in vm_map_sync()
3818 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_sync()
3820 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { in vm_map_sync()
3826 ((start & (pagesizes[bdry_idx] - 1)) != 0 || in vm_map_sync()
3827 (end & (pagesizes[bdry_idx] - 1)) != 0)) { in vm_map_sync()
3833 if (end > entry->end && in vm_map_sync()
3834 entry->end != next_entry->start) { in vm_map_sync()
3841 pmap_remove(map->pmap, start, end); in vm_map_sync()
3848 for (entry = first_entry; entry->start < end;) { in vm_map_sync()
3849 offset = entry->offset + (start - entry->start); in vm_map_sync()
3850 size = (end <= entry->end ? end : entry->end) - start; in vm_map_sync()
3851 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_sync()
3856 smap = entry->object.sub_map; in vm_map_sync()
3859 tsize = tentry->end - offset; in vm_map_sync()
3862 object = tentry->object.vm_object; in vm_map_sync()
3863 offset = tentry->offset + (offset - tentry->start); in vm_map_sync()
3866 object = entry->object.vm_object; in vm_map_sync()
3869 last_timestamp = map->timestamp; in vm_map_sync()
3876 if (last_timestamp == map->timestamp || in vm_map_sync()
3899 KASSERT(entry->wired_count > 0, in vm_map_entry_unwire()
3902 size = entry->end - entry->start; in vm_map_entry_unwire()
3903 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) in vm_map_entry_unwire()
3905 pmap_unwire(map->pmap, entry->start, entry->end); in vm_map_entry_unwire()
3906 vm_object_unwire(entry->object.vm_object, entry->offset, size, in vm_map_entry_unwire()
3908 entry->wired_count = 0; in vm_map_entry_unwire()
3915 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) in vm_map_entry_deallocate()
3916 vm_object_deallocate(entry->object.vm_object); in vm_map_entry_deallocate()
3933 object = entry->object.vm_object; in vm_map_entry_delete()
3935 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_entry_delete()
3936 MPASS(entry->cred == NULL); in vm_map_entry_delete()
3937 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); in vm_map_entry_delete()
3943 size = entry->end - entry->start; in vm_map_entry_delete()
3944 map->size -= size; in vm_map_entry_delete()
3946 if (entry->cred != NULL) { in vm_map_entry_delete()
3947 swap_release_by_cred(size, entry->cred); in vm_map_entry_delete()
3948 crfree(entry->cred); in vm_map_entry_delete()
3951 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { in vm_map_entry_delete()
3952 entry->object.vm_object = NULL; in vm_map_entry_delete()
3953 } else if ((object->flags & OBJ_ANON) != 0 || in vm_map_entry_delete()
3955 KASSERT(entry->cred == NULL || object->cred == NULL || in vm_map_entry_delete()
3956 (entry->eflags & MAP_ENTRY_NEEDS_COPY), in vm_map_entry_delete()
3958 offidxstart = OFF_TO_IDX(entry->offset); in vm_map_entry_delete()
3961 if (object->ref_count != 1 && in vm_map_entry_delete()
3962 ((object->flags & OBJ_ONEMAPPING) != 0 || in vm_map_entry_delete()
3969 * pmap_remove() on the only mapping to this range in vm_map_entry_delete()
3974 if (offidxend >= object->size && in vm_map_entry_delete()
3975 offidxstart < object->size) { in vm_map_entry_delete()
3976 size1 = object->size; in vm_map_entry_delete()
3977 object->size = offidxstart; in vm_map_entry_delete()
3978 if (object->cred != NULL) { in vm_map_entry_delete()
3979 size1 -= object->size; in vm_map_entry_delete()
3980 KASSERT(object->charge >= ptoa(size1), in vm_map_entry_delete()
3981 ("object %p charge < 0", object)); in vm_map_entry_delete()
3983 object->cred); in vm_map_entry_delete()
3984 object->charge -= ptoa(size1); in vm_map_entry_delete()
3993 entry->defer_next = curthread->td_map_def_user; in vm_map_entry_delete()
3994 curthread->td_map_def_user = entry; in vm_map_entry_delete()
4022 for (; entry->start < end; entry = next_entry) { in vm_map_delete()
4028 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || in vm_map_delete()
4034 saved_start = entry->start; in vm_map_delete()
4035 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_delete()
4036 last_timestamp = map->timestamp; in vm_map_delete()
4039 if (last_timestamp + 1 != map->timestamp) { in vm_map_delete()
4065 if (entry->wired_count != 0) in vm_map_delete()
4073 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in vm_map_delete()
4074 entry->object.vm_object != NULL) in vm_map_delete()
4075 pmap_map_delete(map->pmap, entry->start, entry->end); in vm_map_delete()
4135 if (start < entry->start) in vm_map_check_protection()
4140 if ((entry->protection & protection) != protection) in vm_map_check_protection()
4143 start = entry->end; in vm_map_check_protection()
4153 * Copies a swap-backed object from an existing map entry to a
4154 * new one. Carries forward the swap charge. May change the
4165 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4167 if ((src_object->flags & OBJ_ANON) != 0) { in vm_map_copy_swap_object()
4170 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { in vm_map_copy_swap_object()
4172 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4179 if (src_entry->cred != NULL && in vm_map_copy_swap_object()
4180 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4181 KASSERT(src_object->cred == NULL, in vm_map_copy_swap_object()
4184 src_object->cred = src_entry->cred; in vm_map_copy_swap_object()
4185 src_object->charge = size; in vm_map_copy_swap_object()
4187 dst_entry->object.vm_object = src_object; in vm_map_copy_swap_object()
4189 cred = curthread->td_ucred; in vm_map_copy_swap_object()
4191 dst_entry->cred = cred; in vm_map_copy_swap_object()
4193 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4195 src_entry->cred = cred; in vm_map_copy_swap_object()
4221 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) in vm_map_copy_entry()
4224 if (src_entry->wired_count == 0 || in vm_map_copy_entry()
4225 (src_entry->protection & VM_PROT_WRITE) == 0) { in vm_map_copy_entry()
4228 * write-protected. in vm_map_copy_entry()
4230 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && in vm_map_copy_entry()
4231 (src_entry->protection & VM_PROT_WRITE) != 0) { in vm_map_copy_entry()
4232 pmap_protect(src_map->pmap, in vm_map_copy_entry()
4233 src_entry->start, in vm_map_copy_entry()
4234 src_entry->end, in vm_map_copy_entry()
4235 src_entry->protection & ~VM_PROT_WRITE); in vm_map_copy_entry()
4241 size = src_entry->end - src_entry->start; in vm_map_copy_entry()
4242 if ((src_object = src_entry->object.vm_object) != NULL) { in vm_map_copy_entry()
4243 if ((src_object->flags & OBJ_SWAP) != 0) { in vm_map_copy_entry()
4247 src_object = src_entry->object.vm_object; in vm_map_copy_entry()
4250 dst_entry->object.vm_object = src_object; in vm_map_copy_entry()
4252 src_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4254 dst_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4256 dst_entry->offset = src_entry->offset; in vm_map_copy_entry()
4257 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { in vm_map_copy_entry()
4264 * decrement object->un_pager writecount in vm_map_copy_entry()
4269 fake_entry->eflags = MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4270 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4272 fake_entry->object.vm_object = src_object; in vm_map_copy_entry()
4273 fake_entry->start = src_entry->start; in vm_map_copy_entry()
4274 fake_entry->end = src_entry->end; in vm_map_copy_entry()
4275 fake_entry->defer_next = in vm_map_copy_entry()
4276 curthread->td_map_def_user; in vm_map_copy_entry()
4277 curthread->td_map_def_user = fake_entry; in vm_map_copy_entry()
4280 pmap_copy(dst_map->pmap, src_map->pmap, in vm_map_copy_entry()
4281 dst_entry->start, dst_entry->end - dst_entry->start, in vm_map_copy_entry()
4282 src_entry->start); in vm_map_copy_entry()
4284 dst_entry->object.vm_object = NULL; in vm_map_copy_entry()
4285 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_copy_entry()
4286 dst_entry->offset = 0; in vm_map_copy_entry()
4287 if (src_entry->cred != NULL) { in vm_map_copy_entry()
4288 dst_entry->cred = curthread->td_ucred; in vm_map_copy_entry()
4289 crhold(dst_entry->cred); in vm_map_copy_entry()
4295 * We don't want to make writeable wired pages copy-on-write. in vm_map_copy_entry()
4306 * Update the newly-forked vmspace each time a map entry is inherited
4308 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4317 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) in vmspace_map_entry_forked()
4319 entrysize = entry->end - entry->start; in vmspace_map_entry_forked()
4320 vm2->vm_map.size += entrysize; in vmspace_map_entry_forked()
4321 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { in vmspace_map_entry_forked()
4322 vm2->vm_ssize += btoc(entrysize); in vmspace_map_entry_forked()
4323 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && in vmspace_map_entry_forked()
4324 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { in vmspace_map_entry_forked()
4325 newend = MIN(entry->end, in vmspace_map_entry_forked()
4326 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); in vmspace_map_entry_forked()
4327 vm2->vm_dsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4328 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && in vmspace_map_entry_forked()
4329 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { in vmspace_map_entry_forked()
4330 newend = MIN(entry->end, in vmspace_map_entry_forked()
4331 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); in vmspace_map_entry_forked()
4332 vm2->vm_tsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4357 old_map = &vm1->vm_map; in vmspace_fork()
4364 vm2->vm_taddr = vm1->vm_taddr; in vmspace_fork()
4365 vm2->vm_daddr = vm1->vm_daddr; in vmspace_fork()
4366 vm2->vm_maxsaddr = vm1->vm_maxsaddr; in vmspace_fork()
4367 vm2->vm_stacktop = vm1->vm_stacktop; in vmspace_fork()
4368 vm2->vm_shp_base = vm1->vm_shp_base; in vmspace_fork()
4370 if (old_map->busy) in vmspace_fork()
4372 new_map = &vm2->vm_map; in vmspace_fork()
4376 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); in vmspace_fork()
4378 sx_xunlock(&old_map->lock); in vmspace_fork()
4379 sx_xunlock(&new_map->lock); in vmspace_fork()
4385 new_map->anon_loc = old_map->anon_loc; in vmspace_fork()
4386 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | in vmspace_fork()
4390 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vmspace_fork()
4393 inh = old_entry->inheritance; in vmspace_fork()
4394 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && in vmspace_fork()
4407 object = old_entry->object.vm_object; in vmspace_fork()
4410 object = old_entry->object.vm_object; in vmspace_fork()
4418 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vmspace_fork()
4419 vm_object_shadow(&old_entry->object.vm_object, in vmspace_fork()
4420 &old_entry->offset, in vmspace_fork()
4421 old_entry->end - old_entry->start, in vmspace_fork()
4422 old_entry->cred, in vmspace_fork()
4425 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vmspace_fork()
4426 old_entry->cred = NULL; in vmspace_fork()
4434 object = old_entry->object.vm_object; in vmspace_fork()
4438 if (old_entry->cred != NULL) { in vmspace_fork()
4439 KASSERT(object->cred == NULL, in vmspace_fork()
4441 object->cred = old_entry->cred; in vmspace_fork()
4442 object->charge = old_entry->end - in vmspace_fork()
4443 old_entry->start; in vmspace_fork()
4444 old_entry->cred = NULL; in vmspace_fork()
4453 if (old_entry->eflags & MAP_ENTRY_WRITECNT && in vmspace_fork()
4454 object->type == OBJT_VNODE) { in vmspace_fork()
4455 KASSERT(((struct vnode *)object-> in vmspace_fork()
4456 handle)->v_writecount > 0, in vmspace_fork()
4459 KASSERT(object->un_pager.vnp. in vmspace_fork()
4472 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4474 new_entry->wiring_thread = NULL; in vmspace_fork()
4475 new_entry->wired_count = 0; in vmspace_fork()
4476 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { in vmspace_fork()
4478 new_entry->start, new_entry->end); in vmspace_fork()
4483 * Insert the entry into the new map -- we know we're in vmspace_fork()
4492 pmap_copy(new_map->pmap, old_map->pmap, in vmspace_fork()
4493 new_entry->start, in vmspace_fork()
4494 (old_entry->end - old_entry->start), in vmspace_fork()
4495 old_entry->start); in vmspace_fork()
4507 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4509 new_entry->wiring_thread = NULL; in vmspace_fork()
4510 new_entry->wired_count = 0; in vmspace_fork()
4511 new_entry->object.vm_object = NULL; in vmspace_fork()
4512 new_entry->cred = NULL; in vmspace_fork()
4522 * Create a new anonymous mapping entry modelled from in vmspace_fork()
4528 new_entry->start = old_entry->start; in vmspace_fork()
4529 new_entry->end = old_entry->end; in vmspace_fork()
4530 new_entry->eflags = old_entry->eflags & in vmspace_fork()
4534 new_entry->protection = old_entry->protection; in vmspace_fork()
4535 new_entry->max_protection = old_entry->max_protection; in vmspace_fork()
4536 new_entry->inheritance = VM_INHERIT_ZERO; in vmspace_fork()
4541 new_entry->cred = curthread->td_ucred; in vmspace_fork()
4542 crhold(new_entry->cred); in vmspace_fork()
4543 *fork_charge += (new_entry->end - new_entry->start); in vmspace_fork()
4553 sx_xunlock(&old_map->lock); in vmspace_fork()
4554 sx_xunlock(&new_map->lock); in vmspace_fork()
4572 MPASS((map->flags & MAP_WIREFUTURE) == 0); in vm_map_stack()
4577 /* If we would blow our VMEM resource limit, no go */ in vm_map_stack()
4578 if (map->size + init_ssize > vmemlim) { in vm_map_stack()
4604 ("New mapping is not a stack")); in vm_map_stack_locked()
4609 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_stack_locked()
4610 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_stack_locked()
4617 init_ssize = max_ssize - sgp; in vm_map_stack_locked()
4624 * If we can't accommodate max_ssize in the current mapping, no go. in vm_map_stack_locked()
4626 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) in vm_map_stack_locked()
4637 bot = addrbos + max_ssize - init_ssize; in vm_map_stack_locked()
4645 KASSERT(new_entry->end == top || new_entry->start == bot, in vm_map_stack_locked()
4647 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, in vm_map_stack_locked()
4655 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, in vm_map_stack_locked()
4656 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); in vm_map_stack_locked()
4657 KASSERT((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0, in vm_map_stack_locked()
4659 gap_entry->eflags)); in vm_map_stack_locked()
4663 * read-ahead logic is never used for it. Re-use in vm_map_stack_locked()
4670 gap_entry->next_read = sgp; in vm_map_stack_locked()
4671 gap_entry->offset = prot | PROT_MAX(max); in vm_map_stack_locked()
4695 uint64_t limit; in vm_map_growstack() local
4702 vm = p->p_vmspace; in vm_map_growstack()
4709 if (p != initproc && (map != &p->p_vmspace->vm_map || in vm_map_growstack()
4710 p->p_textvp == NULL)) in vm_map_growstack()
4722 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_growstack()
4724 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0) { in vm_map_growstack()
4726 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || in vm_map_growstack()
4727 stack_entry->start != gap_entry->end) in vm_map_growstack()
4729 grow_amount = round_page(stack_entry->start - addr); in vm_map_growstack()
4733 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_growstack()
4734 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_growstack()
4735 gap_entry->next_read; in vm_map_growstack()
4736 max_grow = gap_entry->end - gap_entry->start; in vm_map_growstack()
4739 max_grow -= guard; in vm_map_growstack()
4745 * limit. in vm_map_growstack()
4747 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && in vm_map_growstack()
4748 addr < (vm_offset_t)vm->vm_stacktop; in vm_map_growstack()
4749 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) in vm_map_growstack()
4756 ctob(vm->vm_ssize) + grow_amount)) { in vm_map_growstack()
4767 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { in vm_map_growstack()
4768 grow_amount = trunc_page((vm_size_t)stacklim) - in vm_map_growstack()
4769 ctob(vm->vm_ssize); in vm_map_growstack()
4774 limit = racct_get_available(p, RACCT_STACK); in vm_map_growstack()
4776 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) in vm_map_growstack()
4777 grow_amount = limit - ctob(vm->vm_ssize); in vm_map_growstack()
4780 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4781 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { in vm_map_growstack()
4789 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { in vm_map_growstack()
4799 /* If we would blow our VMEM resource limit, no go */ in vm_map_growstack()
4800 if (map->size + grow_amount > vmemlim) { in vm_map_growstack()
4807 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { in vm_map_growstack()
4826 prot = PROT_EXTRACT(gap_entry->offset); in vm_map_growstack()
4827 max = PROT_MAX_EXTRACT(gap_entry->offset); in vm_map_growstack()
4828 sgp = gap_entry->next_read; in vm_map_growstack()
4830 grow_start = gap_entry->end - grow_amount; in vm_map_growstack()
4831 if (gap_entry->start + grow_amount == gap_entry->end) { in vm_map_growstack()
4832 gap_start = gap_entry->start; in vm_map_growstack()
4833 gap_end = gap_entry->end; in vm_map_growstack()
4837 MPASS(gap_entry->start < gap_entry->end - grow_amount); in vm_map_growstack()
4838 vm_map_entry_resize(map, gap_entry, -grow_amount); in vm_map_growstack()
4850 gap_entry->next_read = sgp; in vm_map_growstack()
4851 gap_entry->offset = prot | PROT_MAX(max); in vm_map_growstack()
4858 vm->vm_ssize += btoc(grow_amount); in vm_map_growstack()
4863 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4874 error = racct_set(p, RACCT_VMEM, map->size); in vm_map_growstack()
4878 ptoa(pmap_wired_count(map->pmap))); in vm_map_growstack()
4881 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); in vm_map_growstack()
4897 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_exec()
4900 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, in vmspace_exec()
4905 newvmspace->vm_swrss = oldvmspace->vm_swrss; in vmspace_exec()
4914 p->p_vmspace = newvmspace; in vmspace_exec()
4916 if (p == curthread->td_proc) in vmspace_exec()
4918 curthread->td_pflags |= TDP_EXECVMSPC; in vmspace_exec()
4929 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_unshare()
4935 * cannot concurrently transition 1 -> 2. in vmspace_unshare()
4937 if (refcount_load(&oldvmspace->vm_refcnt) == 1) in vmspace_unshare()
4943 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { in vmspace_unshare()
4948 p->p_vmspace = newvmspace; in vmspace_unshare()
4950 if (p == curthread->td_proc) in vmspace_unshare()
5014 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_lookup()
5017 *var_map = map = entry->object.sub_map; in vm_map_lookup()
5025 prot = entry->protection; in vm_map_lookup()
5029 (entry->eflags & MAP_ENTRY_GUARD) != 0 && in vm_map_lookup()
5030 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 && in vm_map_lookup()
5039 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & in vm_map_lookup()
5042 ("entry %p flags %x", entry, entry->eflags)); in vm_map_lookup()
5044 (entry->max_protection & VM_PROT_WRITE) == 0 && in vm_map_lookup()
5045 (entry->eflags & MAP_ENTRY_COW) == 0) { in vm_map_lookup()
5054 *wired = (entry->wired_count != 0); in vm_map_lookup()
5056 fault_type = entry->protection; in vm_map_lookup()
5057 size = entry->end - entry->start; in vm_map_lookup()
5060 * If the entry was copy-on-write, we either ... in vm_map_lookup()
5062 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup()
5075 * -- one just moved from the map to the new in vm_map_lookup()
5081 if (entry->cred == NULL) { in vm_map_lookup()
5086 cred = curthread->td_ucred; in vm_map_lookup()
5093 entry->cred = cred; in vm_map_lookup()
5095 eobject = entry->object.vm_object; in vm_map_lookup()
5096 vm_object_shadow(&entry->object.vm_object, in vm_map_lookup()
5097 &entry->offset, size, entry->cred, false); in vm_map_lookup()
5098 if (eobject == entry->object.vm_object) { in vm_map_lookup()
5102 swap_release_by_cred(size, entry->cred); in vm_map_lookup()
5103 crfree(entry->cred); in vm_map_lookup()
5105 entry->cred = NULL; in vm_map_lookup()
5106 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vm_map_lookup()
5111 * We're attempting to read a copy-on-write page -- in vm_map_lookup()
5121 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) { in vm_map_lookup()
5124 entry->object.vm_object = vm_object_allocate_anon(atop(size), in vm_map_lookup()
5125 NULL, entry->cred, size); in vm_map_lookup()
5126 entry->offset = 0; in vm_map_lookup()
5127 entry->cred = NULL; in vm_map_lookup()
5133 * copy-on-write or empty, it has been fixed up. in vm_map_lookup()
5135 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup()
5136 *object = entry->object.vm_object; in vm_map_lookup()
5174 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) in vm_map_lookup_locked()
5180 prot = entry->protection; in vm_map_lookup_locked()
5189 *wired = (entry->wired_count != 0); in vm_map_lookup_locked()
5191 fault_type = entry->protection; in vm_map_lookup_locked()
5193 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup_locked()
5195 * Fail if the entry was copy-on-write for a write fault. in vm_map_lookup_locked()
5200 * We're attempting to read a copy-on-write page -- in vm_map_lookup_locked()
5209 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) in vm_map_lookup_locked()
5214 * copy-on-write or empty, it has been fixed up. in vm_map_lookup_locked()
5216 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup_locked()
5217 *object = entry->object.vm_object; in vm_map_lookup_locked()
5233 * Unlock the main-level map in vm_map_lookup_done()
5256 return (map->pmap); in vm_map_pmap_KBI()
5275 ++map->nupdates; in _vm_map_assert_consistent()
5280 header = prev = &map->header; in _vm_map_assert_consistent()
5282 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5283 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5284 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5285 KASSERT(entry->start < entry->end, in _vm_map_assert_consistent()
5287 (uintmax_t)entry->start, (uintmax_t)entry->end)); in _vm_map_assert_consistent()
5288 KASSERT(entry->left == header || in _vm_map_assert_consistent()
5289 entry->left->start < entry->start, in _vm_map_assert_consistent()
5290 ("map %p left->start = %jx, start = %jx", map, in _vm_map_assert_consistent()
5291 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5292 KASSERT(entry->right == header || in _vm_map_assert_consistent()
5293 entry->start < entry->right->start, in _vm_map_assert_consistent()
5294 ("map %p start = %jx, right->start = %jx", map, in _vm_map_assert_consistent()
5295 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); in _vm_map_assert_consistent()
5296 cur = map->root; in _vm_map_assert_consistent()
5299 if (entry->start < cur->start) { in _vm_map_assert_consistent()
5301 cur = cur->left; in _vm_map_assert_consistent()
5304 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5305 } else if (cur->end <= entry->start) { in _vm_map_assert_consistent()
5307 cur = cur->right; in _vm_map_assert_consistent()
5310 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5314 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5320 KASSERT(entry->max_free == vm_size_max(max_left, max_right), in _vm_map_assert_consistent()
5322 (uintmax_t)entry->max_free, in _vm_map_assert_consistent()
5326 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5327 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5328 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5345 (void *)map->pmap, map->nentries, map->timestamp); in vm_map_print()
5348 prev = &map->header; in vm_map_print()
5351 (void *)entry, (void *)entry->start, (void *)entry->end, in vm_map_print()
5352 entry->eflags); in vm_map_print()
5358 entry->protection, in vm_map_print()
5359 entry->max_protection, in vm_map_print()
5361 entry->inheritance]); in vm_map_print()
5362 if (entry->wired_count != 0) in vm_map_print()
5365 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_print()
5367 (void *)entry->object.sub_map, in vm_map_print()
5368 (uintmax_t)entry->offset); in vm_map_print()
5369 if (prev == &map->header || in vm_map_print()
5370 prev->object.sub_map != in vm_map_print()
5371 entry->object.sub_map) { in vm_map_print()
5373 vm_map_print((vm_map_t)entry->object.sub_map); in vm_map_print()
5374 db_indent -= 2; in vm_map_print()
5377 if (entry->cred != NULL) in vm_map_print()
5378 db_printf(", ruid %d", entry->cred->cr_ruid); in vm_map_print()
5380 (void *)entry->object.vm_object, in vm_map_print()
5381 (uintmax_t)entry->offset); in vm_map_print()
5382 if (entry->object.vm_object && entry->object.vm_object->cred) in vm_map_print()
5383 db_printf(", obj ruid %d charge %jx", in vm_map_print()
5384 entry->object.vm_object->cred->cr_ruid, in vm_map_print()
5385 (uintmax_t)entry->object.vm_object->charge); in vm_map_print()
5386 if (entry->eflags & MAP_ENTRY_COW) in vm_map_print()
5388 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); in vm_map_print()
5391 if (prev == &map->header || in vm_map_print()
5392 prev->object.vm_object != in vm_map_print()
5393 entry->object.vm_object) { in vm_map_print()
5396 entry->object.vm_object, in vm_map_print()
5398 db_indent -= 2; in vm_map_print()
5403 db_indent -= 2; in vm_map_print()
5427 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, in DB_SHOW_COMMAND()
5428 (void *)vmspace_pmap(p->p_vmspace)); in DB_SHOW_COMMAND()
5430 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); in DB_SHOW_COMMAND()