Lines Matching +full:fast +full:- +full:charge +full:- +full:current +full:- +full:limit
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
106 * Maps consist of an ordered doubly-linked list of simple
107 * entries; a self-adjusting binary search tree of these
120 * another, and then marking both regions as copy-on-write.
148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
237 * The worst-case upper bound on the number of kernel map entries that may be
261 * Disable the use of per-CPU buckets: map entry allocation is in vm_map_startup()
293 map = &vm->vm_map; in vmspace_zinit()
296 mtx_init(&map->system_mtx, "vm map (system)", NULL, in vmspace_zinit()
298 sx_init(&map->lock, "vm map (user)"); in vmspace_zinit()
310 KASSERT(vm->vm_map.nentries == 0, in vmspace_zdtor()
311 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); in vmspace_zdtor()
312 KASSERT(vm->vm_map.size == 0, in vmspace_zdtor()
313 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); in vmspace_zdtor()
327 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); in vmspace_alloc()
333 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); in vmspace_alloc()
334 refcount_init(&vm->vm_refcnt, 1); in vmspace_alloc()
335 vm->vm_shm = NULL; in vmspace_alloc()
336 vm->vm_swrss = 0; in vmspace_alloc()
337 vm->vm_tsize = 0; in vmspace_alloc()
338 vm->vm_dsize = 0; in vmspace_alloc()
339 vm->vm_ssize = 0; in vmspace_alloc()
340 vm->vm_taddr = 0; in vmspace_alloc()
341 vm->vm_daddr = 0; in vmspace_alloc()
342 vm->vm_maxsaddr = 0; in vmspace_alloc()
378 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), in vmspace_dofree()
379 vm_map_max(&vm->vm_map)); in vmspace_dofree()
382 vm->vm_map.pmap = NULL; in vmspace_dofree()
393 if (refcount_release(&vm->vm_refcnt)) in vmspace_free()
403 vm = p->p_vmspace; in vmspace_exitfree()
404 p->p_vmspace = NULL; in vmspace_exitfree()
417 p = td->td_proc; in vmspace_exit()
418 vm = p->p_vmspace; in vmspace_exit()
428 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { in vmspace_exit()
429 if (p->p_vmspace != &vmspace0) { in vmspace_exit()
431 p->p_vmspace = &vmspace0; in vmspace_exit()
435 released = refcount_release(&vm->vm_refcnt); in vmspace_exit()
442 if (p->p_vmspace != vm) { in vmspace_exit()
444 p->p_vmspace = vm; in vmspace_exit()
450 p->p_vmspace = &vmspace0; in vmspace_exit()
469 vm = p->p_vmspace; in vmspace_acquire_ref()
470 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { in vmspace_acquire_ref()
474 if (vm != p->p_vmspace) { in vmspace_acquire_ref()
493 * a result, the 'newvm' vmspace always has a non-zero reference
505 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, in vmspace_switch_aio()
508 oldvm = curproc->p_vmspace; in vmspace_switch_aio()
515 curproc->p_vmspace = newvm; in vmspace_switch_aio()
516 refcount_acquire(&newvm->vm_refcnt); in vmspace_switch_aio()
528 if (map->system_map) in _vm_map_lock()
529 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock()
531 sx_xlock_(&map->lock, file, line); in _vm_map_lock()
532 map->timestamp++; in _vm_map_lock()
542 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) in vm_map_entry_set_vnode_text()
544 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_set_vnode_text()
546 object = entry->object.vm_object; in vm_map_entry_set_vnode_text()
548 if ((object->flags & OBJ_ANON) != 0) in vm_map_entry_set_vnode_text()
549 object = object->handle; in vm_map_entry_set_vnode_text()
551 KASSERT(object->backing_object == NULL, in vm_map_entry_set_vnode_text()
552 ("non-anon object %p shadows", object)); in vm_map_entry_set_vnode_text()
554 entry, entry->object.vm_object)); in vm_map_entry_set_vnode_text()
589 entry = td->td_map_def_user; in vm_map_process_deferred()
590 td->td_map_def_user = NULL; in vm_map_process_deferred()
592 next = entry->defer_next; in vm_map_process_deferred()
593 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | in vm_map_process_deferred()
596 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { in vm_map_process_deferred()
601 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_process_deferred()
603 object = entry->object.vm_object; in vm_map_process_deferred()
605 vm_pager_release_writecount(object, entry->start, in vm_map_process_deferred()
606 entry->end); in vm_map_process_deferred()
619 if (map->system_map) in _vm_map_assert_locked()
620 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_assert_locked()
622 sx_assert_(&map->lock, SA_XLOCKED, file, line); in _vm_map_assert_locked()
643 if (map->nupdates > map->nentries) { \
645 map->nupdates = 0; \
662 if (map->system_map) { in _vm_map_unlock()
664 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { in _vm_map_unlock()
666 map->flags &= ~MAP_REPLENISH; in _vm_map_unlock()
669 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock()
671 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock()
680 if (map->system_map) in _vm_map_lock_read()
681 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock_read()
683 sx_slock_(&map->lock, file, line); in _vm_map_lock_read()
690 if (map->system_map) { in _vm_map_unlock_read()
691 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_read()
693 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_read()
695 sx_sunlock_(&map->lock, file, line); in _vm_map_unlock_read()
705 error = map->system_map ? in _vm_map_trylock()
706 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock()
707 !sx_try_xlock_(&map->lock, file, line); in _vm_map_trylock()
709 map->timestamp++; in _vm_map_trylock()
718 error = map->system_map ? in _vm_map_trylock_read()
719 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock_read()
720 !sx_try_slock_(&map->lock, file, line); in _vm_map_trylock_read()
729 * non-zero value if the upgrade fails. If the upgrade fails, the map is
739 if (map->system_map) { in _vm_map_lock_upgrade()
740 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_upgrade()
742 if (!sx_try_upgrade_(&map->lock, file, line)) { in _vm_map_lock_upgrade()
743 last_timestamp = map->timestamp; in _vm_map_lock_upgrade()
744 sx_sunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
750 sx_xlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
751 if (last_timestamp != map->timestamp) { in _vm_map_lock_upgrade()
752 sx_xunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
757 map->timestamp++; in _vm_map_lock_upgrade()
765 if (map->system_map) { in _vm_map_lock_downgrade()
766 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_lock_downgrade()
768 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_downgrade()
771 sx_downgrade_(&map->lock, file, line); in _vm_map_lock_downgrade()
778 * Returns a non-zero value if the caller holds a write (exclusive) lock
785 if (map->system_map) in vm_map_locked()
786 return (mtx_owned(&map->system_mtx)); in vm_map_locked()
788 return (sx_xlocked(&map->lock)); in vm_map_locked()
811 if (map->system_map) { in _vm_map_unlock_and_wait()
812 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_and_wait()
814 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_and_wait()
816 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock_and_wait()
818 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", in _vm_map_unlock_and_wait()
839 wakeup(&map->root); in vm_map_wakeup()
847 map->busy++; in vm_map_busy()
855 KASSERT(map->busy, ("vm_map_unbusy: not busy")); in vm_map_unbusy()
856 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { in vm_map_unbusy()
858 wakeup(&map->busy); in vm_map_unbusy()
867 while (map->busy) { in vm_map_wait_busy()
869 if (map->system_map) in vm_map_wait_busy()
870 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); in vm_map_wait_busy()
872 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); in vm_map_wait_busy()
874 map->timestamp++; in vm_map_wait_busy()
891 map->header.eflags = MAP_ENTRY_HEADER; in _vm_map_init()
892 map->needs_wakeup = FALSE; in _vm_map_init()
893 map->system_map = 0; in _vm_map_init()
894 map->pmap = pmap; in _vm_map_init()
895 map->header.end = min; in _vm_map_init()
896 map->header.start = max; in _vm_map_init()
897 map->flags = 0; in _vm_map_init()
898 map->header.left = map->header.right = &map->header; in _vm_map_init()
899 map->root = NULL; in _vm_map_init()
900 map->timestamp = 0; in _vm_map_init()
901 map->busy = 0; in _vm_map_init()
902 map->anon_loc = 0; in _vm_map_init()
904 map->nupdates = 0; in _vm_map_init()
913 mtx_init(&map->system_mtx, "vm map (system)", NULL, in vm_map_init()
915 sx_init(&map->lock, "vm map (user)"); in vm_map_init()
926 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); in vm_map_entry_dispose()
956 kernel_map->flags |= MAP_REPLENISH; in vm_map_entry_create()
960 if (map->system_map) { in vm_map_entry_create()
979 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | in vm_map_entry_set_behavior()
994 return (root->left != left_ancestor ? in vm_map_entry_max_free_left()
995 root->left->max_free : root->start - left_ancestor->end); in vm_map_entry_max_free_left()
1002 return (root->right != right_ancestor ? in vm_map_entry_max_free_right()
1003 root->right->max_free : right_ancestor->start - root->end); in vm_map_entry_max_free_right()
1018 prior = entry->left; in vm_map_entry_pred()
1019 if (prior->right->start < entry->start) { in vm_map_entry_pred()
1021 prior = prior->right; in vm_map_entry_pred()
1022 while (prior->right != entry); in vm_map_entry_pred()
1039 * Infer root->right->max_free == root->max_free when \
1040 * y->max_free < root->max_free || root->max_free == 0. \
1043 y = root->left; \
1044 max_free = root->max_free; \
1049 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1053 z = y->right; \
1055 root->left = z; \
1056 y->right = root; \
1057 if (max_free < y->max_free) \
1058 root->max_free = max_free = \
1059 vm_size_max(max_free, z->max_free); \
1060 } else if (max_free < y->max_free) \
1061 root->max_free = max_free = \
1062 vm_size_max(max_free, root->start - y->end);\
1064 y = root->left; \
1066 /* Copy right->max_free. Put root on rlist. */ \
1067 root->max_free = max_free; \
1070 root->left = rlist; \
1080 * Infer root->left->max_free == root->max_free when \
1081 * y->max_free < root->max_free || root->max_free == 0. \
1084 y = root->right; \
1085 max_free = root->max_free; \
1090 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1094 z = y->left; \
1096 root->right = z; \
1097 y->left = root; \
1098 if (max_free < y->max_free) \
1099 root->max_free = max_free = \
1100 vm_size_max(max_free, z->max_free); \
1101 } else if (max_free < y->max_free) \
1102 root->max_free = max_free = \
1103 vm_size_max(max_free, y->start - root->end);\
1105 y = root->right; \
1107 /* Copy left->max_free. Put root on llist. */ \
1108 root->max_free = max_free; \
1111 root->right = llist; \
1119 * subtrees with root->max_free < length as empty trees. llist and rlist are
1120 * the two sides in reverse order (bottom-up), with llist linked by the right
1122 * lists terminated by &map->header. This function, and the subsequent call to
1124 * values in &map->header.
1132 left = right = &map->header; in vm_map_splay_split()
1133 root = map->root; in vm_map_splay_split()
1134 while (root != NULL && root->max_free >= length) { in vm_map_splay_split()
1135 KASSERT(left->end <= root->start && in vm_map_splay_split()
1136 root->end <= right->start, in vm_map_splay_split()
1138 if (addr < root->start) { in vm_map_splay_split()
1140 y->max_free >= length && addr < y->start); in vm_map_splay_split()
1141 } else if (addr >= root->end) { in vm_map_splay_split()
1143 y->max_free >= length && addr >= y->end); in vm_map_splay_split()
1158 hi = root->right == right ? NULL : root->right; in vm_map_splay_findnext()
1173 lo = root->left == left ? NULL : root->left; in vm_map_splay_findprev()
1203 * llist->max_free and max_free. Update with the in vm_map_splay_merge_left_walk()
1206 llist->max_free = max_free = in vm_map_splay_merge_left_walk()
1207 vm_size_max(llist->max_free, max_free); in vm_map_splay_merge_left_walk()
1208 vm_map_entry_swap(&llist->right, &tail); in vm_map_splay_merge_left_walk()
1211 root->left = tail; in vm_map_splay_merge_left_walk()
1224 max_free = root->start - llist->end; in vm_map_splay_merge_pred()
1229 root->left = header; in vm_map_splay_merge_pred()
1230 header->right = root; in vm_map_splay_merge_pred()
1247 root->left == llist ? root : root->left, in vm_map_splay_merge_left()
1260 * rlist->max_free and max_free. Update with the in vm_map_splay_merge_right_walk()
1263 rlist->max_free = max_free = in vm_map_splay_merge_right_walk()
1264 vm_size_max(rlist->max_free, max_free); in vm_map_splay_merge_right_walk()
1265 vm_map_entry_swap(&rlist->left, &tail); in vm_map_splay_merge_right_walk()
1268 root->right = tail; in vm_map_splay_merge_right_walk()
1281 max_free = rlist->start - root->end; in vm_map_splay_merge_succ()
1286 root->right = header; in vm_map_splay_merge_succ()
1287 header->left = root; in vm_map_splay_merge_succ()
1304 root->right == rlist ? root : root->right, in vm_map_splay_merge_right()
1313 * The Sleator and Tarjan top-down splay algorithm with the
1314 * following variation. Max_free must be computed bottom-up, so
1341 header = &map->header; in vm_map_splay()
1352 llist = root->right; in vm_map_splay()
1361 rlist = root->left; in vm_map_splay()
1368 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_splay()
1369 map->root = root; in vm_map_splay()
1391 map->nentries, entry); in vm_map_entry_link()
1393 map->nentries++; in vm_map_entry_link()
1394 header = &map->header; in vm_map_entry_link()
1395 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_link()
1403 } else if (entry->start == root->start) { in vm_map_entry_link()
1410 KASSERT(entry->end < root->end, in vm_map_entry_link()
1413 if ((root->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_entry_link()
1415 root->offset += entry->end - root->start; in vm_map_entry_link()
1416 root->start = entry->end; in vm_map_entry_link()
1418 max_free_right = root->max_free = vm_size_max( in vm_map_entry_link()
1428 KASSERT(entry->end == root->end, in vm_map_entry_link()
1431 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_entry_link()
1433 entry->offset += entry->start - root->start; in vm_map_entry_link()
1434 root->end = entry->start; in vm_map_entry_link()
1435 max_free_left = root->max_free = vm_size_max( in vm_map_entry_link()
1440 entry->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_link()
1441 map->root = entry; in vm_map_entry_link()
1458 header = &map->header; in vm_map_entry_unlink()
1459 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_unlink()
1466 rlist->start = root->start; in vm_map_entry_unlink()
1467 MPASS((rlist->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_entry_unlink()
1469 rlist->offset = root->offset; in vm_map_entry_unlink()
1473 llist = root->right; in vm_map_entry_unlink()
1478 rlist = root->left; in vm_map_entry_unlink()
1482 header->left = header->right = header; in vm_map_entry_unlink()
1486 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_unlink()
1487 map->root = root; in vm_map_entry_unlink()
1489 map->nentries--; in vm_map_entry_unlink()
1491 map->nentries, entry); in vm_map_entry_unlink()
1508 header = &map->header; in vm_map_entry_resize()
1509 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_resize()
1512 entry->end += grow_amount; in vm_map_entry_resize()
1513 root->max_free = vm_size_max( in vm_map_entry_resize()
1516 map->root = root; in vm_map_entry_resize()
1519 __func__, map, map->nentries, entry); in vm_map_entry_resize()
1545 header = &map->header; in vm_map_lookup_entry()
1546 cur = map->root; in vm_map_lookup_entry()
1551 if (address >= cur->start && cur->end > address) { in vm_map_lookup_entry()
1556 sx_try_upgrade(&map->lock)) { in vm_map_lookup_entry()
1566 sx_downgrade(&map->lock); in vm_map_lookup_entry()
1574 if (address < cur->start) { in vm_map_lookup_entry()
1579 return (address < cur->end); in vm_map_lookup_entry()
1587 if (address < cur->start) { in vm_map_lookup_entry()
1589 cur = cur->left; in vm_map_lookup_entry()
1592 } else if (cur->end <= address) { in vm_map_lookup_entry()
1594 cur = cur->right; in vm_map_lookup_entry()
1642 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | in vm_map_insert1()
1657 if (next_entry->start < end) in vm_map_insert1()
1697 bdry = pagesizes[bidx] - 1; in vm_map_insert1()
1708 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) in vm_map_insert1()
1712 object->cred == NULL, in vm_map_insert1()
1714 cred = curthread->td_ucred; in vm_map_insert1()
1729 if ((object->flags & OBJ_ANON) != 0) { in vm_map_insert1()
1731 if (object->ref_count > 1 || object->shadow_count != 0) in vm_map_insert1()
1735 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1739 prev_entry->end == start && (prev_entry->cred == cred || in vm_map_insert1()
1740 (prev_entry->object.vm_object != NULL && in vm_map_insert1()
1741 prev_entry->object.vm_object->cred == cred)) && in vm_map_insert1()
1742 vm_object_coalesce(prev_entry->object.vm_object, in vm_map_insert1()
1743 prev_entry->offset, in vm_map_insert1()
1744 (vm_size_t)(prev_entry->end - prev_entry->start), in vm_map_insert1()
1745 (vm_size_t)(end - prev_entry->end), cred != NULL && in vm_map_insert1()
1752 if (prev_entry->inheritance == inheritance && in vm_map_insert1()
1753 prev_entry->protection == prot && in vm_map_insert1()
1754 prev_entry->max_protection == max && in vm_map_insert1()
1755 prev_entry->wired_count == 0) { in vm_map_insert1()
1756 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1759 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1760 map->size += end - prev_entry->end; in vm_map_insert1()
1762 end - prev_entry->end); in vm_map_insert1()
1774 object = prev_entry->object.vm_object; in vm_map_insert1()
1775 offset = prev_entry->offset + in vm_map_insert1()
1776 (prev_entry->end - prev_entry->start); in vm_map_insert1()
1778 if (cred != NULL && object != NULL && object->cred != NULL && in vm_map_insert1()
1779 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_insert1()
1791 new_entry->start = start; in vm_map_insert1()
1792 new_entry->end = end; in vm_map_insert1()
1793 new_entry->cred = NULL; in vm_map_insert1()
1795 new_entry->eflags = protoeflags; in vm_map_insert1()
1796 new_entry->object.vm_object = object; in vm_map_insert1()
1797 new_entry->offset = offset; in vm_map_insert1()
1799 new_entry->inheritance = inheritance; in vm_map_insert1()
1800 new_entry->protection = prot; in vm_map_insert1()
1801 new_entry->max_protection = max; in vm_map_insert1()
1802 new_entry->wired_count = 0; in vm_map_insert1()
1803 new_entry->wiring_thread = NULL; in vm_map_insert1()
1804 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; in vm_map_insert1()
1805 new_entry->next_read = start; in vm_map_insert1()
1809 new_entry->cred = cred; in vm_map_insert1()
1815 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1816 map->size += new_entry->end - new_entry->start; in vm_map_insert1()
1829 end - start, cow & MAP_PREFAULT_PARTIAL); in vm_map_insert1()
1843 * If object is non-NULL, ref count must be bumped by caller
1871 * vm_map_max(map)-length+1 if insufficient space.
1887 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) in vm_map_findspace()
1888 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1891 if (map->root == NULL) in vm_map_findspace()
1898 * enough; otherwise set gap_end to start skip gap-checking and move in vm_map_findspace()
1901 header = &map->header; in vm_map_findspace()
1903 gap_end = rlist->start; in vm_map_findspace()
1905 start = root->end; in vm_map_findspace()
1906 if (root->right != rlist) in vm_map_findspace()
1912 rlist = root->left; in vm_map_findspace()
1917 llist = root->right; in vm_map_findspace()
1921 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_findspace()
1922 map->root = root; in vm_map_findspace()
1924 if (length <= gap_end - start) in vm_map_findspace()
1928 if (root->right == header || length > root->right->max_free) in vm_map_findspace()
1929 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1932 * Splay for the least large-enough gap in the right subtree. in vm_map_findspace()
1947 llist = root->right; in vm_map_findspace()
1950 root->max_free = vm_size_max(max_free_left, in vm_map_findspace()
1954 rlist = y->left; in vm_map_findspace()
1955 y->max_free = vm_size_max( in vm_map_findspace()
1958 root->max_free = vm_size_max(max_free_left, y->max_free); in vm_map_findspace()
1960 map->root = root; in vm_map_findspace()
1962 return (root->end); in vm_map_findspace()
1976 ("vm_map_fixed: non-NULL backing object for stack")); in vm_map_fixed()
2033 * specified alignment. Performs an address-ordered, first-fit search from
2112 * first-fit from the specified address; the region found is
2115 * If object is non-NULL, ref count must be bumped by caller
2145 ("non-NULL backing object for stack")); in vm_map_find_locked()
2149 (object->flags & OBJ_COLORED) == 0)) in vm_map_find_locked()
2156 en_aslr = (map->flags & MAP_ASLR) != 0; in vm_map_find_locked()
2158 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && in vm_map_find_locked()
2165 (map->flags & MAP_ASLR_IGNSTART) != 0) in vm_map_find_locked()
2169 curr_min_addr = map->anon_loc; in vm_map_find_locked()
2187 * perform a first-fit search of the available address in vm_map_find_locked()
2204 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? in vm_map_find_locked()
2228 pidx--; in vm_map_find_locked()
2288 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || in vm_map_find_locked()
2289 *addr < map->anon_loc)) in vm_map_find_locked()
2290 map->anon_loc = *addr; in vm_map_find_locked()
2341 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || in vm_map_mergeable_neighbors()
2342 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, in vm_map_mergeable_neighbors()
2345 return (prev->end == entry->start && in vm_map_mergeable_neighbors()
2346 prev->object.vm_object == entry->object.vm_object && in vm_map_mergeable_neighbors()
2347 (prev->object.vm_object == NULL || in vm_map_mergeable_neighbors()
2348 prev->offset + (prev->end - prev->start) == entry->offset) && in vm_map_mergeable_neighbors()
2349 prev->eflags == entry->eflags && in vm_map_mergeable_neighbors()
2350 prev->protection == entry->protection && in vm_map_mergeable_neighbors()
2351 prev->max_protection == entry->max_protection && in vm_map_mergeable_neighbors()
2352 prev->inheritance == entry->inheritance && in vm_map_mergeable_neighbors()
2353 prev->wired_count == entry->wired_count && in vm_map_mergeable_neighbors()
2354 prev->cred == entry->cred); in vm_map_mergeable_neighbors()
2365 * kept without causing a lock-order reversal with the vnode lock. in vm_map_merged_neighbor_dispose()
2368 * object->un_pager.vnp.writemappings, the writemappings value in vm_map_merged_neighbor_dispose()
2371 if (entry->object.vm_object != NULL) in vm_map_merged_neighbor_dispose()
2372 vm_object_deallocate(entry->object.vm_object); in vm_map_merged_neighbor_dispose()
2373 if (entry->cred != NULL) in vm_map_merged_neighbor_dispose()
2374 crfree(entry->cred); in vm_map_merged_neighbor_dispose()
2394 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && in vm_map_try_merge_entries()
2413 KASSERT(entry->object.vm_object == NULL, in vm_map_entry_back()
2415 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_back()
2417 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, in vm_map_entry_back()
2418 entry->cred, entry->end - entry->start); in vm_map_entry_back()
2419 entry->object.vm_object = object; in vm_map_entry_back()
2420 entry->offset = 0; in vm_map_entry_back()
2421 entry->cred = NULL; in vm_map_entry_back()
2435 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_charge_object()
2437 if (entry->object.vm_object == NULL && !map->system_map && in vm_map_entry_charge_object()
2438 (entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_entry_charge_object()
2440 else if (entry->object.vm_object != NULL && in vm_map_entry_charge_object()
2441 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && in vm_map_entry_charge_object()
2442 entry->cred != NULL) { in vm_map_entry_charge_object()
2443 VM_OBJECT_WLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2444 KASSERT(entry->object.vm_object->cred == NULL, in vm_map_entry_charge_object()
2446 entry->object.vm_object->cred = entry->cred; in vm_map_entry_charge_object()
2447 entry->object.vm_object->charge = entry->end - entry->start; in vm_map_entry_charge_object()
2448 VM_OBJECT_WUNLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2449 entry->cred = NULL; in vm_map_entry_charge_object()
2474 if (new_entry->cred != NULL) in vm_map_entry_clone()
2475 crhold(entry->cred); in vm_map_entry_clone()
2476 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { in vm_map_entry_clone()
2477 vm_object_reference(new_entry->object.vm_object); in vm_map_entry_clone()
2480 * The object->un_pager.vnp.writemappings for the object of in vm_map_entry_clone()
2482 * virtual pages are re-distributed among the clipped entries, in vm_map_entry_clone()
2502 if (!map->system_map) in vm_map_clip_start()
2507 if (startaddr <= entry->start) in vm_map_clip_start()
2511 KASSERT(entry->end > startaddr && entry->start < startaddr, in vm_map_clip_start()
2516 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_start()
2526 new_entry->end = startaddr; in vm_map_clip_start()
2545 if (!map->system_map) in vm_map_lookup_clip_start()
2575 if (!map->system_map) in vm_map_clip_end()
2580 if (endaddr >= entry->end) in vm_map_clip_end()
2584 KASSERT(entry->start < endaddr && entry->end > endaddr, in vm_map_clip_end()
2589 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_end()
2599 new_entry->start = endaddr; in vm_map_clip_end()
2636 submap->flags |= MAP_IS_SUB_MAP; in vm_map_submap()
2641 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && in vm_map_submap()
2642 (entry->eflags & MAP_ENTRY_COW) == 0 && in vm_map_submap()
2643 entry->object.vm_object == NULL) { in vm_map_submap()
2650 entry->object.sub_map = submap; in vm_map_submap()
2651 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; in vm_map_submap()
2659 submap->flags &= ~MAP_IS_SUB_MAP; in vm_map_submap()
2674 * object's memory-resident pages. No further physical pages are
2677 * limited number of page mappings are created at the low-end of the
2693 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2695 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2696 pmap_object_init_pt(map->pmap, addr, object, pindex, in vm_map_pmap_enter()
2706 if (psize + pindex > object->size) { in vm_map_pmap_enter()
2707 if (pindex >= object->size) { in vm_map_pmap_enter()
2711 psize = object->size - pindex; in vm_map_pmap_enter()
2725 p != NULL && (tmpidx = p->pindex - pindex) < psize; in vm_map_pmap_enter()
2744 for (psind = p->psind; psind > 0; psind--) { in vm_map_pmap_enter()
2746 (pagesizes[psind] - 1)) == 0) { in vm_map_pmap_enter()
2747 mask = atop(pagesizes[psind]) - 1; in vm_map_pmap_enter()
2758 pmap_enter_object(map->pmap, start, addr + in vm_map_pmap_enter()
2764 pmap_enter_object(map->pmap, start, addr + ptoa(psize), in vm_map_pmap_enter()
2775 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); in vm_map_protect_guard()
2776 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP | in vm_map_protect_guard()
2780 old_prot = PROT_EXTRACT(entry->offset); in vm_map_protect_guard()
2782 entry->offset = PROT_MAX(new_maxprot) | in vm_map_protect_guard()
2786 entry->offset = new_prot | PROT_MAX( in vm_map_protect_guard()
2787 PROT_MAX_EXTRACT(entry->offset)); in vm_map_protect_guard()
2822 if ((map->flags & MAP_WXORX) != 0 && in vm_map_protect()
2843 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { in vm_map_protect()
2851 while (!CONTAINS_BITS(first_entry->eflags, in vm_map_protect()
2855 start = first_entry->start; in vm_map_protect()
2866 for (entry = first_entry; entry->start < end; in vm_map_protect()
2868 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_protect()
2872 if ((entry->eflags & (MAP_ENTRY_GUARD | in vm_map_protect()
2876 max_prot = (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_protect()
2878 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; in vm_map_protect()
2883 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) in vm_map_protect()
2888 * Postpone the operation until all in-transition map entries have in vm_map_protect()
2889 * stabilized. An in-transition entry might already have its pages in vm_map_protect()
2895 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_protect()
2902 * private (i.e., copy-on-write) mappings that are transitioning from in vm_map_protect()
2903 * read-only to read/write access. If a reservation fails, break out in vm_map_protect()
2912 for (entry = first_entry; entry->start < end; in vm_map_protect()
2921 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || in vm_map_protect()
2923 (entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2926 cred = curthread->td_ucred; in vm_map_protect()
2927 obj = entry->object.vm_object; in vm_map_protect()
2930 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { in vm_map_protect()
2931 if (!swap_reserve(entry->end - entry->start)) { in vm_map_protect()
2933 end = entry->end; in vm_map_protect()
2937 entry->cred = cred; in vm_map_protect()
2942 if ((obj->flags & OBJ_SWAP) == 0) { in vm_map_protect()
2948 * Charge for the whole object allocation now, since in vm_map_protect()
2949 * we cannot distinguish between non-charged and in vm_map_protect()
2952 KASSERT(obj->charge == 0, in vm_map_protect()
2955 if (!swap_reserve(ptoa(obj->size))) { in vm_map_protect()
2958 end = entry->end; in vm_map_protect()
2963 obj->cred = cred; in vm_map_protect()
2964 obj->charge = ptoa(obj->size); in vm_map_protect()
2974 entry->start < end; in vm_map_protect()
2980 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_protect()
2986 old_prot = entry->protection; in vm_map_protect()
2989 entry->max_protection = new_maxprot; in vm_map_protect()
2990 entry->protection = new_maxprot & old_prot; in vm_map_protect()
2993 entry->protection = new_prot; in vm_map_protect()
2999 * copy-on-write and enable write access in the physical map. in vm_map_protect()
3001 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && in vm_map_protect()
3002 (entry->protection & VM_PROT_WRITE) != 0 && in vm_map_protect()
3008 * about copy-on-write here. in vm_map_protect()
3010 if ((old_prot & ~entry->protection) != 0) { in vm_map_protect()
3011 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ in vm_map_protect()
3013 pmap_protect(map->pmap, entry->start, in vm_map_protect()
3014 entry->end, in vm_map_protect()
3015 entry->protection & MASK(entry)); in vm_map_protect()
3046 * various clipping operations. Otherwise we only need a read-lock in vm_map_madvise()
3092 for (; entry->start < end; prev_entry = entry, in vm_map_madvise()
3094 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_map_madvise()
3117 entry->eflags |= MAP_ENTRY_NOSYNC; in vm_map_madvise()
3120 entry->eflags &= ~MAP_ENTRY_NOSYNC; in vm_map_madvise()
3123 entry->eflags |= MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3126 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3147 for (; entry->start < end; in vm_map_madvise()
3151 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | in vm_map_madvise()
3158 * we hold the VM map read-locked, neither the in vm_map_madvise()
3163 entry->object.vm_object != NULL && in vm_map_madvise()
3164 entry->object.vm_object->backing_object != NULL) in vm_map_madvise()
3167 pstart = OFF_TO_IDX(entry->offset); in vm_map_madvise()
3168 pend = pstart + atop(entry->end - entry->start); in vm_map_madvise()
3169 useStart = entry->start; in vm_map_madvise()
3170 useEnd = entry->end; in vm_map_madvise()
3172 if (entry->start < start) { in vm_map_madvise()
3173 pstart += atop(start - entry->start); in vm_map_madvise()
3176 if (entry->end > end) { in vm_map_madvise()
3177 pend -= atop(entry->end - end); in vm_map_madvise()
3195 pmap_advise(map->pmap, useStart, useEnd, in vm_map_madvise()
3198 vm_object_madvise(entry->object.vm_object, pstart, in vm_map_madvise()
3202 * Pre-populate paging structures in the in vm_map_madvise()
3207 entry->wired_count == 0) { in vm_map_madvise()
3210 entry->protection, in vm_map_madvise()
3211 entry->object.vm_object, in vm_map_madvise()
3213 ptoa(pend - pstart), in vm_map_madvise()
3254 if (vm_map_lookup_entry(map, end - 1, &lentry)) { in vm_map_inherit()
3260 for (entry = start_entry; entry->start < end; in vm_map_inherit()
3262 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) in vm_map_inherit()
3269 for (entry = start_entry; entry->start < end; prev_entry = entry, in vm_map_inherit()
3271 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", in vm_map_inherit()
3272 entry, (uintmax_t)entry->end, (uintmax_t)end)); in vm_map_inherit()
3273 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || in vm_map_inherit()
3275 entry->inheritance = new_inheritance; in vm_map_inherit()
3289 * another held the lock, lookup a possibly-changed entry at or after the
3301 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_entry_in_transition()
3302 ("not in-tranition map entry %p", in_entry)); in vm_map_entry_in_transition()
3306 start = MAX(in_start, in_entry->start); in vm_map_entry_in_transition()
3307 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_entry_in_transition()
3308 last_timestamp = map->timestamp; in vm_map_entry_in_transition()
3315 if (last_timestamp + 1 == map->timestamp) in vm_map_entry_in_transition()
3361 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_unwire()
3362 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_unwire()
3391 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_unwire()
3392 entry->wiring_thread == NULL, in vm_map_unwire()
3394 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3395 entry->wiring_thread = curthread; in vm_map_unwire()
3402 entry->end < end && next_entry->start > entry->end) { in vm_map_unwire()
3403 end = entry->end; in vm_map_unwire()
3412 end = entry->end; in vm_map_unwire()
3427 for (; entry->start < end; in vm_map_unwire()
3438 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_unwire()
3439 entry->wiring_thread != curthread) { in vm_map_unwire()
3446 (entry->eflags & MAP_ENTRY_USER_WIRED))) { in vm_map_unwire()
3447 if (entry->wired_count == 1) in vm_map_unwire()
3450 entry->wired_count--; in vm_map_unwire()
3452 entry->eflags &= ~MAP_ENTRY_USER_WIRED; in vm_map_unwire()
3454 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_unwire()
3455 ("vm_map_unwire: in-transition flag missing %p", entry)); in vm_map_unwire()
3456 KASSERT(entry->wiring_thread == curthread, in vm_map_unwire()
3458 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3459 entry->wiring_thread = NULL; in vm_map_unwire()
3460 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_unwire()
3461 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_unwire()
3508 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && in vm_map_wire_entry_failure()
3509 entry->wired_count == 1, in vm_map_wire_entry_failure()
3511 KASSERT(failed_addr < entry->end, in vm_map_wire_entry_failure()
3518 if (failed_addr > entry->start) { in vm_map_wire_entry_failure()
3519 pmap_unwire(map->pmap, entry->start, failed_addr); in vm_map_wire_entry_failure()
3520 vm_object_unwire(entry->object.vm_object, entry->offset, in vm_map_wire_entry_failure()
3521 failed_addr - entry->start, PQ_ACTIVE); in vm_map_wire_entry_failure()
3525 * Assign an out-of-range value to represent the failure to wire this in vm_map_wire_entry_failure()
3528 entry->wired_count = -1; in vm_map_wire_entry_failure()
3575 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_wire_locked()
3576 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_wire_locked()
3603 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_wire_locked()
3604 entry->wiring_thread == NULL, in vm_map_wire_locked()
3606 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_wire_locked()
3607 entry->wiring_thread = curthread; in vm_map_wire_locked()
3608 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 in vm_map_wire_locked()
3609 || (entry->protection & prot) != prot) { in vm_map_wire_locked()
3610 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; in vm_map_wire_locked()
3612 end = entry->end; in vm_map_wire_locked()
3616 } else if (entry->wired_count == 0) { in vm_map_wire_locked()
3617 entry->wired_count++; in vm_map_wire_locked()
3619 npages = atop(entry->end - entry->start); in vm_map_wire_locked()
3622 entry->start); in vm_map_wire_locked()
3623 end = entry->end; in vm_map_wire_locked()
3629 * Release the map lock, relying on the in-transition in vm_map_wire_locked()
3632 saved_start = entry->start; in vm_map_wire_locked()
3633 saved_end = entry->end; in vm_map_wire_locked()
3634 last_timestamp = map->timestamp; in vm_map_wire_locked()
3653 if (last_timestamp + 1 != map->timestamp) { in vm_map_wire_locked()
3666 for (entry = next_entry; entry->end < saved_end; in vm_map_wire_locked()
3675 faddr < entry->end) in vm_map_wire_locked()
3684 end = entry->end; in vm_map_wire_locked()
3688 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3689 entry->wired_count++; in vm_map_wire_locked()
3697 entry->end < end && next_entry->start > entry->end) { in vm_map_wire_locked()
3698 end = entry->end; in vm_map_wire_locked()
3715 for (; entry->start < end; in vm_map_wire_locked()
3730 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_wire_locked()
3731 entry->wiring_thread != curthread) { in vm_map_wire_locked()
3737 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { in vm_map_wire_locked()
3741 entry->eflags |= MAP_ENTRY_USER_WIRED; in vm_map_wire_locked()
3742 } else if (entry->wired_count == -1) { in vm_map_wire_locked()
3747 entry->wired_count = 0; in vm_map_wire_locked()
3749 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3754 if (entry->wired_count == 1) { in vm_map_wire_locked()
3758 atop(entry->end - entry->start)); in vm_map_wire_locked()
3760 entry->wired_count--; in vm_map_wire_locked()
3762 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_wire_locked()
3763 ("vm_map_wire: in-transition flag missing %p", entry)); in vm_map_wire_locked()
3764 KASSERT(entry->wiring_thread == curthread, in vm_map_wire_locked()
3766 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | in vm_map_wire_locked()
3768 entry->wiring_thread = NULL; in vm_map_wire_locked()
3769 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_wire_locked()
3770 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_wire_locked()
3793 * flushing the current region containing start.
3819 start = first_entry->start; in vm_map_sync()
3820 end = first_entry->end; in vm_map_sync()
3824 * Make a first pass to check for user-wired memory, holes, in vm_map_sync()
3827 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_sync()
3829 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { in vm_map_sync()
3835 ((start & (pagesizes[bdry_idx] - 1)) != 0 || in vm_map_sync()
3836 (end & (pagesizes[bdry_idx] - 1)) != 0)) { in vm_map_sync()
3842 if (end > entry->end && in vm_map_sync()
3843 entry->end != next_entry->start) { in vm_map_sync()
3850 pmap_remove(map->pmap, start, end); in vm_map_sync()
3857 for (entry = first_entry; entry->start < end;) { in vm_map_sync()
3858 offset = entry->offset + (start - entry->start); in vm_map_sync()
3859 size = (end <= entry->end ? end : entry->end) - start; in vm_map_sync()
3860 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_sync()
3865 smap = entry->object.sub_map; in vm_map_sync()
3868 tsize = tentry->end - offset; in vm_map_sync()
3871 object = tentry->object.vm_object; in vm_map_sync()
3872 offset = tentry->offset + (offset - tentry->start); in vm_map_sync()
3875 object = entry->object.vm_object; in vm_map_sync()
3878 last_timestamp = map->timestamp; in vm_map_sync()
3885 if (last_timestamp == map->timestamp || in vm_map_sync()
3908 KASSERT(entry->wired_count > 0, in vm_map_entry_unwire()
3911 size = entry->end - entry->start; in vm_map_entry_unwire()
3912 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) in vm_map_entry_unwire()
3914 pmap_unwire(map->pmap, entry->start, entry->end); in vm_map_entry_unwire()
3915 vm_object_unwire(entry->object.vm_object, entry->offset, size, in vm_map_entry_unwire()
3917 entry->wired_count = 0; in vm_map_entry_unwire()
3924 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) in vm_map_entry_deallocate()
3925 vm_object_deallocate(entry->object.vm_object); in vm_map_entry_deallocate()
3942 object = entry->object.vm_object; in vm_map_entry_delete()
3944 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_entry_delete()
3945 MPASS(entry->cred == NULL); in vm_map_entry_delete()
3946 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); in vm_map_entry_delete()
3948 vm_map_entry_deallocate(entry, map->system_map); in vm_map_entry_delete()
3952 size = entry->end - entry->start; in vm_map_entry_delete()
3953 map->size -= size; in vm_map_entry_delete()
3955 if (entry->cred != NULL) { in vm_map_entry_delete()
3956 swap_release_by_cred(size, entry->cred); in vm_map_entry_delete()
3957 crfree(entry->cred); in vm_map_entry_delete()
3960 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { in vm_map_entry_delete()
3961 entry->object.vm_object = NULL; in vm_map_entry_delete()
3962 } else if ((object->flags & OBJ_ANON) != 0 || in vm_map_entry_delete()
3964 KASSERT(entry->cred == NULL || object->cred == NULL || in vm_map_entry_delete()
3965 (entry->eflags & MAP_ENTRY_NEEDS_COPY), in vm_map_entry_delete()
3967 offidxstart = OFF_TO_IDX(entry->offset); in vm_map_entry_delete()
3970 if (object->ref_count != 1 && in vm_map_entry_delete()
3971 ((object->flags & OBJ_ONEMAPPING) != 0 || in vm_map_entry_delete()
3983 if (offidxend >= object->size && in vm_map_entry_delete()
3984 offidxstart < object->size) { in vm_map_entry_delete()
3985 size1 = object->size; in vm_map_entry_delete()
3986 object->size = offidxstart; in vm_map_entry_delete()
3987 if (object->cred != NULL) { in vm_map_entry_delete()
3988 size1 -= object->size; in vm_map_entry_delete()
3989 KASSERT(object->charge >= ptoa(size1), in vm_map_entry_delete()
3990 ("object %p charge < 0", object)); in vm_map_entry_delete()
3992 object->cred); in vm_map_entry_delete()
3993 object->charge -= ptoa(size1); in vm_map_entry_delete()
3999 if (map->system_map) in vm_map_entry_delete()
4002 entry->defer_next = curthread->td_map_def_user; in vm_map_entry_delete()
4003 curthread->td_map_def_user = entry; in vm_map_entry_delete()
4031 for (; entry->start < end; entry = next_entry) { in vm_map_delete()
4037 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || in vm_map_delete()
4043 saved_start = entry->start; in vm_map_delete()
4044 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_delete()
4045 last_timestamp = map->timestamp; in vm_map_delete()
4048 if (last_timestamp + 1 != map->timestamp) { in vm_map_delete()
4074 if (entry->wired_count != 0) in vm_map_delete()
4082 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in vm_map_delete()
4083 entry->object.vm_object != NULL) in vm_map_delete()
4084 pmap_map_delete(map->pmap, entry->start, entry->end); in vm_map_delete()
4144 if (start < entry->start) in vm_map_check_protection()
4149 if ((entry->protection & protection) != protection) in vm_map_check_protection()
4152 start = entry->end; in vm_map_check_protection()
4162 * Copies a swap-backed object from an existing map entry to a
4163 * new one. Carries forward the swap charge. May change the
4174 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4176 if ((src_object->flags & OBJ_ANON) != 0) { in vm_map_copy_swap_object()
4179 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { in vm_map_copy_swap_object()
4181 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4188 if (src_entry->cred != NULL && in vm_map_copy_swap_object()
4189 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4190 KASSERT(src_object->cred == NULL, in vm_map_copy_swap_object()
4193 src_object->cred = src_entry->cred; in vm_map_copy_swap_object()
4194 src_object->charge = size; in vm_map_copy_swap_object()
4196 dst_entry->object.vm_object = src_object; in vm_map_copy_swap_object()
4198 cred = curthread->td_ucred; in vm_map_copy_swap_object()
4200 dst_entry->cred = cred; in vm_map_copy_swap_object()
4202 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4204 src_entry->cred = cred; in vm_map_copy_swap_object()
4230 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) in vm_map_copy_entry()
4233 if (src_entry->wired_count == 0 || in vm_map_copy_entry()
4234 (src_entry->protection & VM_PROT_WRITE) == 0) { in vm_map_copy_entry()
4237 * write-protected. in vm_map_copy_entry()
4239 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && in vm_map_copy_entry()
4240 (src_entry->protection & VM_PROT_WRITE) != 0) { in vm_map_copy_entry()
4241 pmap_protect(src_map->pmap, in vm_map_copy_entry()
4242 src_entry->start, in vm_map_copy_entry()
4243 src_entry->end, in vm_map_copy_entry()
4244 src_entry->protection & ~VM_PROT_WRITE); in vm_map_copy_entry()
4250 size = src_entry->end - src_entry->start; in vm_map_copy_entry()
4251 if ((src_object = src_entry->object.vm_object) != NULL) { in vm_map_copy_entry()
4252 if ((src_object->flags & OBJ_SWAP) != 0) { in vm_map_copy_entry()
4256 src_object = src_entry->object.vm_object; in vm_map_copy_entry()
4259 dst_entry->object.vm_object = src_object; in vm_map_copy_entry()
4261 src_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4263 dst_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4265 dst_entry->offset = src_entry->offset; in vm_map_copy_entry()
4266 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { in vm_map_copy_entry()
4273 * decrement object->un_pager writecount in vm_map_copy_entry()
4278 fake_entry->eflags = MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4279 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4281 fake_entry->object.vm_object = src_object; in vm_map_copy_entry()
4282 fake_entry->start = src_entry->start; in vm_map_copy_entry()
4283 fake_entry->end = src_entry->end; in vm_map_copy_entry()
4284 fake_entry->defer_next = in vm_map_copy_entry()
4285 curthread->td_map_def_user; in vm_map_copy_entry()
4286 curthread->td_map_def_user = fake_entry; in vm_map_copy_entry()
4289 pmap_copy(dst_map->pmap, src_map->pmap, in vm_map_copy_entry()
4290 dst_entry->start, dst_entry->end - dst_entry->start, in vm_map_copy_entry()
4291 src_entry->start); in vm_map_copy_entry()
4293 dst_entry->object.vm_object = NULL; in vm_map_copy_entry()
4294 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_copy_entry()
4295 dst_entry->offset = 0; in vm_map_copy_entry()
4296 if (src_entry->cred != NULL) { in vm_map_copy_entry()
4297 dst_entry->cred = curthread->td_ucred; in vm_map_copy_entry()
4298 crhold(dst_entry->cred); in vm_map_copy_entry()
4304 * We don't want to make writeable wired pages copy-on-write. in vm_map_copy_entry()
4315 * Update the newly-forked vmspace each time a map entry is inherited
4317 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4326 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) in vmspace_map_entry_forked()
4328 entrysize = entry->end - entry->start; in vmspace_map_entry_forked()
4329 vm2->vm_map.size += entrysize; in vmspace_map_entry_forked()
4330 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { in vmspace_map_entry_forked()
4331 vm2->vm_ssize += btoc(entrysize); in vmspace_map_entry_forked()
4332 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && in vmspace_map_entry_forked()
4333 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { in vmspace_map_entry_forked()
4334 newend = MIN(entry->end, in vmspace_map_entry_forked()
4335 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); in vmspace_map_entry_forked()
4336 vm2->vm_dsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4337 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && in vmspace_map_entry_forked()
4338 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { in vmspace_map_entry_forked()
4339 newend = MIN(entry->end, in vmspace_map_entry_forked()
4340 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); in vmspace_map_entry_forked()
4341 vm2->vm_tsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4366 old_map = &vm1->vm_map; in vmspace_fork()
4373 vm2->vm_taddr = vm1->vm_taddr; in vmspace_fork()
4374 vm2->vm_daddr = vm1->vm_daddr; in vmspace_fork()
4375 vm2->vm_maxsaddr = vm1->vm_maxsaddr; in vmspace_fork()
4376 vm2->vm_stacktop = vm1->vm_stacktop; in vmspace_fork()
4377 vm2->vm_shp_base = vm1->vm_shp_base; in vmspace_fork()
4379 if (old_map->busy) in vmspace_fork()
4381 new_map = &vm2->vm_map; in vmspace_fork()
4385 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); in vmspace_fork()
4387 sx_xunlock(&old_map->lock); in vmspace_fork()
4388 sx_xunlock(&new_map->lock); in vmspace_fork()
4394 new_map->anon_loc = old_map->anon_loc; in vmspace_fork()
4395 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | in vmspace_fork()
4399 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vmspace_fork()
4402 inh = old_entry->inheritance; in vmspace_fork()
4403 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && in vmspace_fork()
4416 object = old_entry->object.vm_object; in vmspace_fork()
4419 object = old_entry->object.vm_object; in vmspace_fork()
4427 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vmspace_fork()
4428 vm_object_shadow(&old_entry->object.vm_object, in vmspace_fork()
4429 &old_entry->offset, in vmspace_fork()
4430 old_entry->end - old_entry->start, in vmspace_fork()
4431 old_entry->cred, in vmspace_fork()
4434 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vmspace_fork()
4435 old_entry->cred = NULL; in vmspace_fork()
4443 object = old_entry->object.vm_object; in vmspace_fork()
4447 if (old_entry->cred != NULL) { in vmspace_fork()
4448 KASSERT(object->cred == NULL, in vmspace_fork()
4450 object->cred = old_entry->cred; in vmspace_fork()
4451 object->charge = old_entry->end - in vmspace_fork()
4452 old_entry->start; in vmspace_fork()
4453 old_entry->cred = NULL; in vmspace_fork()
4462 if (old_entry->eflags & MAP_ENTRY_WRITECNT && in vmspace_fork()
4463 object->type == OBJT_VNODE) { in vmspace_fork()
4464 KASSERT(((struct vnode *)object-> in vmspace_fork()
4465 handle)->v_writecount > 0, in vmspace_fork()
4468 KASSERT(object->un_pager.vnp. in vmspace_fork()
4481 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4483 new_entry->wiring_thread = NULL; in vmspace_fork()
4484 new_entry->wired_count = 0; in vmspace_fork()
4485 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { in vmspace_fork()
4487 new_entry->start, new_entry->end); in vmspace_fork()
4492 * Insert the entry into the new map -- we know we're in vmspace_fork()
4501 pmap_copy(new_map->pmap, old_map->pmap, in vmspace_fork()
4502 new_entry->start, in vmspace_fork()
4503 (old_entry->end - old_entry->start), in vmspace_fork()
4504 old_entry->start); in vmspace_fork()
4516 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4518 new_entry->wiring_thread = NULL; in vmspace_fork()
4519 new_entry->wired_count = 0; in vmspace_fork()
4520 new_entry->object.vm_object = NULL; in vmspace_fork()
4521 new_entry->cred = NULL; in vmspace_fork()
4537 new_entry->start = old_entry->start; in vmspace_fork()
4538 new_entry->end = old_entry->end; in vmspace_fork()
4539 new_entry->eflags = old_entry->eflags & in vmspace_fork()
4543 new_entry->protection = old_entry->protection; in vmspace_fork()
4544 new_entry->max_protection = old_entry->max_protection; in vmspace_fork()
4545 new_entry->inheritance = VM_INHERIT_ZERO; in vmspace_fork()
4550 new_entry->cred = curthread->td_ucred; in vmspace_fork()
4551 crhold(new_entry->cred); in vmspace_fork()
4552 *fork_charge += (new_entry->end - new_entry->start); in vmspace_fork()
4562 sx_xunlock(&old_map->lock); in vmspace_fork()
4563 sx_xunlock(&new_map->lock); in vmspace_fork()
4581 MPASS((map->flags & MAP_WIREFUTURE) == 0); in vm_map_stack()
4586 /* If we would blow our VMEM resource limit, no go */ in vm_map_stack()
4587 if (map->size + init_ssize > vmemlim) { in vm_map_stack()
4620 ("bi-dir stack")); in vm_map_stack_locked()
4625 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_stack_locked()
4626 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_stack_locked()
4633 init_ssize = max_ssize - sgp; in vm_map_stack_locked()
4640 * If we can't accommodate max_ssize in the current mapping, no go. in vm_map_stack_locked()
4642 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) in vm_map_stack_locked()
4656 bot = addrbos + max_ssize - init_ssize; in vm_map_stack_locked()
4670 KASSERT(new_entry->end == top || new_entry->start == bot, in vm_map_stack_locked()
4673 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, in vm_map_stack_locked()
4676 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, in vm_map_stack_locked()
4684 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, in vm_map_stack_locked()
4685 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); in vm_map_stack_locked()
4686 KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_stack_locked()
4689 gap_entry->eflags)); in vm_map_stack_locked()
4693 * read-ahead logic is never used for it. Re-use in vm_map_stack_locked()
4700 gap_entry->next_read = sgp; in vm_map_stack_locked()
4701 gap_entry->offset = prot | PROT_MAX(max); in vm_map_stack_locked()
4726 uint64_t limit; in vm_map_growstack() local
4733 vm = p->p_vmspace; in vm_map_growstack()
4740 if (p != initproc && (map != &p->p_vmspace->vm_map || in vm_map_growstack()
4741 p->p_textvp == NULL)) in vm_map_growstack()
4744 MPASS(!map->system_map); in vm_map_growstack()
4753 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_growstack()
4755 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { in vm_map_growstack()
4757 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || in vm_map_growstack()
4758 stack_entry->start != gap_entry->end) in vm_map_growstack()
4760 grow_amount = round_page(stack_entry->start - addr); in vm_map_growstack()
4762 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { in vm_map_growstack()
4764 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || in vm_map_growstack()
4765 stack_entry->end != gap_entry->start) in vm_map_growstack()
4767 grow_amount = round_page(addr + 1 - stack_entry->end); in vm_map_growstack()
4772 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_growstack()
4773 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_growstack()
4774 gap_entry->next_read; in vm_map_growstack()
4775 max_grow = gap_entry->end - gap_entry->start; in vm_map_growstack()
4778 max_grow -= guard; in vm_map_growstack()
4784 * limit. in vm_map_growstack()
4786 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && in vm_map_growstack()
4787 addr < (vm_offset_t)vm->vm_stacktop; in vm_map_growstack()
4788 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) in vm_map_growstack()
4795 ctob(vm->vm_ssize) + grow_amount)) { in vm_map_growstack()
4806 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { in vm_map_growstack()
4807 grow_amount = trunc_page((vm_size_t)stacklim) - in vm_map_growstack()
4808 ctob(vm->vm_ssize); in vm_map_growstack()
4813 limit = racct_get_available(p, RACCT_STACK); in vm_map_growstack()
4815 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) in vm_map_growstack()
4816 grow_amount = limit - ctob(vm->vm_ssize); in vm_map_growstack()
4819 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4820 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { in vm_map_growstack()
4828 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { in vm_map_growstack()
4838 /* If we would blow our VMEM resource limit, no go */ in vm_map_growstack()
4839 if (map->size + grow_amount > vmemlim) { in vm_map_growstack()
4846 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { in vm_map_growstack()
4866 prot = PROT_EXTRACT(gap_entry->offset); in vm_map_growstack()
4867 max = PROT_MAX_EXTRACT(gap_entry->offset); in vm_map_growstack()
4868 sgp = gap_entry->next_read; in vm_map_growstack()
4870 grow_start = gap_entry->end - grow_amount; in vm_map_growstack()
4871 if (gap_entry->start + grow_amount == gap_entry->end) { in vm_map_growstack()
4872 gap_start = gap_entry->start; in vm_map_growstack()
4873 gap_end = gap_entry->end; in vm_map_growstack()
4877 MPASS(gap_entry->start < gap_entry->end - grow_amount); in vm_map_growstack()
4878 vm_map_entry_resize(map, gap_entry, -grow_amount); in vm_map_growstack()
4890 gap_entry->next_read = sgp; in vm_map_growstack()
4891 gap_entry->offset = prot | PROT_MAX(max); in vm_map_growstack()
4897 grow_start = stack_entry->end; in vm_map_growstack()
4898 cred = stack_entry->cred; in vm_map_growstack()
4899 if (cred == NULL && stack_entry->object.vm_object != NULL) in vm_map_growstack()
4900 cred = stack_entry->object.vm_object->cred; in vm_map_growstack()
4904 else if (stack_entry->object.vm_object == NULL || in vm_map_growstack()
4905 vm_object_coalesce(stack_entry->object.vm_object, in vm_map_growstack()
4906 stack_entry->offset, in vm_map_growstack()
4907 (vm_size_t)(stack_entry->end - stack_entry->start), in vm_map_growstack()
4909 if (gap_entry->start + grow_amount == gap_entry->end) { in vm_map_growstack()
4914 gap_entry->start += grow_amount; in vm_map_growstack()
4915 stack_entry->end += grow_amount; in vm_map_growstack()
4917 map->size += grow_amount; in vm_map_growstack()
4923 vm->vm_ssize += btoc(grow_amount); in vm_map_growstack()
4928 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4939 error = racct_set(p, RACCT_VMEM, map->size); in vm_map_growstack()
4943 ptoa(pmap_wired_count(map->pmap))); in vm_map_growstack()
4946 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); in vm_map_growstack()
4962 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_exec()
4965 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, in vmspace_exec()
4970 newvmspace->vm_swrss = oldvmspace->vm_swrss; in vmspace_exec()
4979 p->p_vmspace = newvmspace; in vmspace_exec()
4981 if (p == curthread->td_proc) in vmspace_exec()
4983 curthread->td_pflags |= TDP_EXECVMSPC; in vmspace_exec()
4994 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_unshare()
5000 * cannot concurrently transition 1 -> 2. in vmspace_unshare()
5002 if (refcount_load(&oldvmspace->vm_refcnt) == 1) in vmspace_unshare()
5008 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { in vmspace_unshare()
5013 p->p_vmspace = newvmspace; in vmspace_unshare()
5015 if (p == curthread->td_proc) in vmspace_unshare()
5036 * vm_map_lookup_done, to make that fast.
5079 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_lookup()
5082 *var_map = map = entry->object.sub_map; in vm_map_lookup()
5090 prot = entry->protection; in vm_map_lookup()
5094 (entry->eflags & MAP_ENTRY_GUARD) != 0 && in vm_map_lookup()
5095 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_lookup()
5105 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & in vm_map_lookup()
5108 ("entry %p flags %x", entry, entry->eflags)); in vm_map_lookup()
5110 (entry->max_protection & VM_PROT_WRITE) == 0 && in vm_map_lookup()
5111 (entry->eflags & MAP_ENTRY_COW) == 0) { in vm_map_lookup()
5120 *wired = (entry->wired_count != 0); in vm_map_lookup()
5122 fault_type = entry->protection; in vm_map_lookup()
5123 size = entry->end - entry->start; in vm_map_lookup()
5126 * If the entry was copy-on-write, we either ... in vm_map_lookup()
5128 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup()
5141 * -- one just moved from the map to the new in vm_map_lookup()
5147 if (entry->cred == NULL) { in vm_map_lookup()
5152 cred = curthread->td_ucred; in vm_map_lookup()
5159 entry->cred = cred; in vm_map_lookup()
5161 eobject = entry->object.vm_object; in vm_map_lookup()
5162 vm_object_shadow(&entry->object.vm_object, in vm_map_lookup()
5163 &entry->offset, size, entry->cred, false); in vm_map_lookup()
5164 if (eobject == entry->object.vm_object) { in vm_map_lookup()
5168 swap_release_by_cred(size, entry->cred); in vm_map_lookup()
5169 crfree(entry->cred); in vm_map_lookup()
5171 entry->cred = NULL; in vm_map_lookup()
5172 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vm_map_lookup()
5177 * We're attempting to read a copy-on-write page -- in vm_map_lookup()
5187 if (entry->object.vm_object == NULL && !map->system_map) { in vm_map_lookup()
5190 entry->object.vm_object = vm_object_allocate_anon(atop(size), in vm_map_lookup()
5191 NULL, entry->cred, size); in vm_map_lookup()
5192 entry->offset = 0; in vm_map_lookup()
5193 entry->cred = NULL; in vm_map_lookup()
5199 * copy-on-write or empty, it has been fixed up. in vm_map_lookup()
5201 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup()
5202 *object = entry->object.vm_object; in vm_map_lookup()
5240 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) in vm_map_lookup_locked()
5246 prot = entry->protection; in vm_map_lookup_locked()
5255 *wired = (entry->wired_count != 0); in vm_map_lookup_locked()
5257 fault_type = entry->protection; in vm_map_lookup_locked()
5259 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup_locked()
5261 * Fail if the entry was copy-on-write for a write fault. in vm_map_lookup_locked()
5266 * We're attempting to read a copy-on-write page -- in vm_map_lookup_locked()
5275 if (entry->object.vm_object == NULL && !map->system_map) in vm_map_lookup_locked()
5280 * copy-on-write or empty, it has been fixed up. in vm_map_lookup_locked()
5282 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup_locked()
5283 *object = entry->object.vm_object; in vm_map_lookup_locked()
5299 * Unlock the main-level map in vm_map_lookup_done()
5322 return (map->pmap); in vm_map_pmap_KBI()
5341 ++map->nupdates; in _vm_map_assert_consistent()
5346 header = prev = &map->header; in _vm_map_assert_consistent()
5348 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5349 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5350 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5351 KASSERT(entry->start < entry->end, in _vm_map_assert_consistent()
5353 (uintmax_t)entry->start, (uintmax_t)entry->end)); in _vm_map_assert_consistent()
5354 KASSERT(entry->left == header || in _vm_map_assert_consistent()
5355 entry->left->start < entry->start, in _vm_map_assert_consistent()
5356 ("map %p left->start = %jx, start = %jx", map, in _vm_map_assert_consistent()
5357 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5358 KASSERT(entry->right == header || in _vm_map_assert_consistent()
5359 entry->start < entry->right->start, in _vm_map_assert_consistent()
5360 ("map %p start = %jx, right->start = %jx", map, in _vm_map_assert_consistent()
5361 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); in _vm_map_assert_consistent()
5362 cur = map->root; in _vm_map_assert_consistent()
5365 if (entry->start < cur->start) { in _vm_map_assert_consistent()
5367 cur = cur->left; in _vm_map_assert_consistent()
5370 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5371 } else if (cur->end <= entry->start) { in _vm_map_assert_consistent()
5373 cur = cur->right; in _vm_map_assert_consistent()
5376 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5380 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5386 KASSERT(entry->max_free == vm_size_max(max_left, max_right), in _vm_map_assert_consistent()
5388 (uintmax_t)entry->max_free, in _vm_map_assert_consistent()
5392 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5393 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5394 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5411 (void *)map->pmap, map->nentries, map->timestamp); in vm_map_print()
5414 prev = &map->header; in vm_map_print()
5417 (void *)entry, (void *)entry->start, (void *)entry->end, in vm_map_print()
5418 entry->eflags); in vm_map_print()
5424 entry->protection, in vm_map_print()
5425 entry->max_protection, in vm_map_print()
5427 entry->inheritance]); in vm_map_print()
5428 if (entry->wired_count != 0) in vm_map_print()
5431 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_print()
5433 (void *)entry->object.sub_map, in vm_map_print()
5434 (uintmax_t)entry->offset); in vm_map_print()
5435 if (prev == &map->header || in vm_map_print()
5436 prev->object.sub_map != in vm_map_print()
5437 entry->object.sub_map) { in vm_map_print()
5439 vm_map_print((vm_map_t)entry->object.sub_map); in vm_map_print()
5440 db_indent -= 2; in vm_map_print()
5443 if (entry->cred != NULL) in vm_map_print()
5444 db_printf(", ruid %d", entry->cred->cr_ruid); in vm_map_print()
5446 (void *)entry->object.vm_object, in vm_map_print()
5447 (uintmax_t)entry->offset); in vm_map_print()
5448 if (entry->object.vm_object && entry->object.vm_object->cred) in vm_map_print()
5449 db_printf(", obj ruid %d charge %jx", in vm_map_print()
5450 entry->object.vm_object->cred->cr_ruid, in vm_map_print()
5451 (uintmax_t)entry->object.vm_object->charge); in vm_map_print()
5452 if (entry->eflags & MAP_ENTRY_COW) in vm_map_print()
5454 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); in vm_map_print()
5457 if (prev == &map->header || in vm_map_print()
5458 prev->object.vm_object != in vm_map_print()
5459 entry->object.vm_object) { in vm_map_print()
5462 entry->object.vm_object, in vm_map_print()
5464 db_indent -= 2; in vm_map_print()
5469 db_indent -= 2; in vm_map_print()
5493 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, in DB_SHOW_COMMAND()
5494 (void *)vmspace_pmap(p->p_vmspace)); in DB_SHOW_COMMAND()
5496 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); in DB_SHOW_COMMAND()