Lines Matching +full:entry +full:- +full:address

1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
107 * Maps consist of an ordered doubly-linked list of simple
108 * entries; a self-adjusting binary search tree of these
114 * [That is, an entry is split into two, bordering at a
121 * another, and then marking both regions as copy-on-write.
131 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
132 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
133 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
144 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
149 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
150 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
151 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
181 * submap. This function allocates a virtual address range directly from the
238 * The worst-case upper bound on the number of kernel map entries that may be
251 * User map and entry structures are allocated from the general purpose
262 * Disable the use of per-CPU buckets: map entry allocation is in vm_map_startup()
265 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), in vm_map_startup()
269 /* Reserve an extra map entry for use when replenishing the reserve. */ in vm_map_startup()
276 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), in vm_map_startup()
294 map = &vm->vm_map; in vmspace_zinit()
297 sx_init(&map->lock, "vm map (user)"); in vmspace_zinit()
309 KASSERT(vm->vm_map.nentries == 0, in vmspace_zdtor()
310 ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); in vmspace_zdtor()
311 KASSERT(vm->vm_map.size == 0, in vmspace_zdtor()
312 ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); in vmspace_zdtor()
326 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); in vmspace_alloc()
332 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); in vmspace_alloc()
333 refcount_init(&vm->vm_refcnt, 1); in vmspace_alloc()
334 vm->vm_shm = NULL; in vmspace_alloc()
335 vm->vm_swrss = 0; in vmspace_alloc()
336 vm->vm_tsize = 0; in vmspace_alloc()
337 vm->vm_dsize = 0; in vmspace_alloc()
338 vm->vm_ssize = 0; in vmspace_alloc()
339 vm->vm_taddr = 0; in vmspace_alloc()
340 vm->vm_daddr = 0; in vmspace_alloc()
341 vm->vm_maxsaddr = 0; in vmspace_alloc()
377 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), in vmspace_dofree()
378 vm_map_max(&vm->vm_map)); in vmspace_dofree()
381 vm->vm_map.pmap = NULL; in vmspace_dofree()
392 if (refcount_release(&vm->vm_refcnt)) in vmspace_free()
402 vm = p->p_vmspace; in vmspace_exitfree()
403 p->p_vmspace = NULL; in vmspace_exitfree()
416 p = td->td_proc; in vmspace_exit()
417 vm = p->p_vmspace; in vmspace_exit()
427 if (!(released = refcount_release_if_last(&vm->vm_refcnt))) { in vmspace_exit()
428 if (p->p_vmspace != &vmspace0) { in vmspace_exit()
430 p->p_vmspace = &vmspace0; in vmspace_exit()
434 released = refcount_release(&vm->vm_refcnt); in vmspace_exit()
441 if (p->p_vmspace != vm) { in vmspace_exit()
443 p->p_vmspace = vm; in vmspace_exit()
449 p->p_vmspace = &vmspace0; in vmspace_exit()
468 vm = p->p_vmspace; in vmspace_acquire_ref()
469 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) { in vmspace_acquire_ref()
473 if (vm != p->p_vmspace) { in vmspace_acquire_ref()
492 * a result, the 'newvm' vmspace always has a non-zero reference
504 KASSERT(refcount_load(&newvm->vm_refcnt) > 0, in vmspace_switch_aio()
507 oldvm = curproc->p_vmspace; in vmspace_switch_aio()
512 * Point to the new address space and refer to it. in vmspace_switch_aio()
514 curproc->p_vmspace = newvm; in vmspace_switch_aio()
515 refcount_acquire(&newvm->vm_refcnt); in vmspace_switch_aio()
528 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock()
530 sx_xlock_(&map->lock, file, line); in _vm_map_lock()
531 map->timestamp++; in _vm_map_lock()
535 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) in vm_map_entry_set_vnode_text() argument
541 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) in vm_map_entry_set_vnode_text()
543 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_set_vnode_text()
545 object = entry->object.vm_object; in vm_map_entry_set_vnode_text()
546 KASSERT(object != NULL, ("No object for text, entry %p", entry)); in vm_map_entry_set_vnode_text()
547 if ((object->flags & OBJ_ANON) != 0) in vm_map_entry_set_vnode_text()
548 object = object->handle; in vm_map_entry_set_vnode_text()
550 KASSERT(object->backing_object == NULL, in vm_map_entry_set_vnode_text()
551 ("non-anon object %p shadows", object)); in vm_map_entry_set_vnode_text()
552 KASSERT(object != NULL, ("No content object for text, entry %p obj %p", in vm_map_entry_set_vnode_text()
553 entry, entry->object.vm_object)); in vm_map_entry_set_vnode_text()
557 * referenced by the entry we are processing, so it cannot go in vm_map_entry_set_vnode_text()
584 vm_map_entry_t entry, next; in vm_map_process_deferred() local
588 entry = td->td_map_def_user; in vm_map_process_deferred()
589 td->td_map_def_user = NULL; in vm_map_process_deferred()
590 while (entry != NULL) { in vm_map_process_deferred()
591 next = entry->defer_next; in vm_map_process_deferred()
592 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | in vm_map_process_deferred()
595 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { in vm_map_process_deferred()
600 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_process_deferred()
602 object = entry->object.vm_object; in vm_map_process_deferred()
604 vm_pager_release_writecount(object, entry->start, in vm_map_process_deferred()
605 entry->end); in vm_map_process_deferred()
607 vm_map_entry_set_vnode_text(entry, false); in vm_map_process_deferred()
608 vm_map_entry_deallocate(entry, FALSE); in vm_map_process_deferred()
609 entry = next; in vm_map_process_deferred()
619 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_assert_locked()
621 sx_assert_(&map->lock, SA_XLOCKED, file, line); in _vm_map_assert_locked()
642 if (map->nupdates > map->nentries) { \
644 map->nupdates = 0; \
663 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) { in _vm_map_unlock()
665 map->flags &= ~MAP_REPLENISH; in _vm_map_unlock()
668 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock()
670 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock()
680 mtx_lock_flags_(&map->system_mtx, 0, file, line); in _vm_map_lock_read()
682 sx_slock_(&map->lock, file, line); in _vm_map_lock_read()
690 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_read()
692 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_read()
694 sx_sunlock_(&map->lock, file, line); in _vm_map_unlock_read()
705 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock()
706 !sx_try_xlock_(&map->lock, file, line); in _vm_map_trylock()
708 map->timestamp++; in _vm_map_trylock()
718 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : in _vm_map_trylock_read()
719 !sx_try_slock_(&map->lock, file, line); in _vm_map_trylock_read()
728 * non-zero value if the upgrade fails. If the upgrade fails, the map is
739 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_upgrade()
741 if (!sx_try_upgrade_(&map->lock, file, line)) { in _vm_map_lock_upgrade()
742 last_timestamp = map->timestamp; in _vm_map_lock_upgrade()
743 sx_sunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
749 sx_xlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
750 if (last_timestamp != map->timestamp) { in _vm_map_lock_upgrade()
751 sx_xunlock_(&map->lock, file, line); in _vm_map_lock_upgrade()
756 map->timestamp++; in _vm_map_lock_upgrade()
765 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_lock_downgrade()
767 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); in _vm_map_lock_downgrade()
770 sx_downgrade_(&map->lock, file, line); in _vm_map_lock_downgrade()
777 * Returns a non-zero value if the caller holds a write (exclusive) lock
785 return (mtx_owned(&map->system_mtx)); in vm_map_locked()
786 return (sx_xlocked(&map->lock)); in vm_map_locked()
810 KASSERT((map->flags & MAP_REPLENISH) == 0, in _vm_map_unlock_and_wait()
812 mtx_unlock_flags_(&map->system_mtx, 0, file, line); in _vm_map_unlock_and_wait()
814 sx_xunlock_(&map->lock, file, line); in _vm_map_unlock_and_wait()
816 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", in _vm_map_unlock_and_wait()
837 wakeup(&map->root); in vm_map_wakeup()
845 map->busy++; in vm_map_busy()
853 KASSERT(map->busy, ("vm_map_unbusy: not busy")); in vm_map_unbusy()
854 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { in vm_map_unbusy()
856 wakeup(&map->busy); in vm_map_unbusy()
865 while (map->busy) { in vm_map_wait_busy()
868 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); in vm_map_wait_busy()
870 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); in vm_map_wait_busy()
872 map->timestamp++; in vm_map_wait_busy()
889 map->header.eflags = MAP_ENTRY_HEADER; in _vm_map_init()
890 map->pmap = pmap; in _vm_map_init()
891 map->header.end = min; in _vm_map_init()
892 map->header.start = max; in _vm_map_init()
893 map->flags = 0; in _vm_map_init()
894 map->header.left = map->header.right = &map->header; in _vm_map_init()
895 map->root = NULL; in _vm_map_init()
896 map->timestamp = 0; in _vm_map_init()
897 map->busy = 0; in _vm_map_init()
898 map->anon_loc = 0; in _vm_map_init()
900 map->nupdates = 0; in _vm_map_init()
908 sx_init(&map->lock, "vm map (user)"); in vm_map_init()
916 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | in vm_map_init_system()
926 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_dispose() argument
928 uma_zfree(vm_map_is_system(map) ? kmapentzone : mapentzone, entry); in vm_map_entry_dispose()
934 * Allocates a VM map entry for insertion.
935 * No entry fields are filled in.
950 * map entry, dipping into the reserve if necessary, and set a in vm_map_entry_create()
958 kernel_map->flags |= MAP_REPLENISH; in vm_map_entry_create()
979 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) in vm_map_entry_set_behavior() argument
981 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | in vm_map_entry_set_behavior()
996 return (root->left != left_ancestor ? in vm_map_entry_max_free_left()
997 root->left->max_free : root->start - left_ancestor->end); in vm_map_entry_max_free_left()
1004 return (root->right != right_ancestor ? in vm_map_entry_max_free_right()
1005 root->right->max_free : right_ancestor->start - root->end); in vm_map_entry_max_free_right()
1011 * Find the {predecessor, successor} of the entry by taking one step
1016 vm_map_entry_pred(vm_map_entry_t entry) in vm_map_entry_pred() argument
1020 prior = entry->left; in vm_map_entry_pred()
1021 if (prior->right->start < entry->start) { in vm_map_entry_pred()
1023 prior = prior->right; in vm_map_entry_pred()
1024 while (prior->right != entry); in vm_map_entry_pred()
1041 * Infer root->right->max_free == root->max_free when \
1042 * y->max_free < root->max_free || root->max_free == 0. \
1045 y = root->left; \
1046 max_free = root->max_free; \
1051 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1055 z = y->right; \
1057 root->left = z; \
1058 y->right = root; \
1059 if (max_free < y->max_free) \
1060 root->max_free = max_free = \
1061 vm_size_max(max_free, z->max_free); \
1062 } else if (max_free < y->max_free) \
1063 root->max_free = max_free = \
1064 vm_size_max(max_free, root->start - y->end);\
1066 y = root->left; \
1068 /* Copy right->max_free. Put root on rlist. */ \
1069 root->max_free = max_free; \
1072 root->left = rlist; \
1082 * Infer root->left->max_free == root->max_free when \
1083 * y->max_free < root->max_free || root->max_free == 0. \
1086 y = root->right; \
1087 max_free = root->max_free; \
1092 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1096 z = y->left; \
1098 root->right = z; \
1099 y->left = root; \
1100 if (max_free < y->max_free) \
1101 root->max_free = max_free = \
1102 vm_size_max(max_free, z->max_free); \
1103 } else if (max_free < y->max_free) \
1104 root->max_free = max_free = \
1105 vm_size_max(max_free, y->start - root->end);\
1107 y = root->right; \
1109 /* Copy left->max_free. Put root on llist. */ \
1110 root->max_free = max_free; \
1113 root->right = llist; \
1121 * subtrees with root->max_free < length as empty trees. llist and rlist are
1122 * the two sides in reverse order (bottom-up), with llist linked by the right
1124 * lists terminated by &map->header. This function, and the subsequent call to
1125 * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
1126 * values in &map->header.
1134 left = right = &map->header; in vm_map_splay_split()
1135 root = map->root; in vm_map_splay_split()
1136 while (root != NULL && root->max_free >= length) { in vm_map_splay_split()
1137 KASSERT(left->end <= root->start && in vm_map_splay_split()
1138 root->end <= right->start, in vm_map_splay_split()
1140 if (addr < root->start) { in vm_map_splay_split()
1142 y->max_free >= length && addr < y->start); in vm_map_splay_split()
1143 } else if (addr >= root->end) { in vm_map_splay_split()
1145 y->max_free >= length && addr >= y->end); in vm_map_splay_split()
1160 hi = root->right == right ? NULL : root->right; in vm_map_splay_findnext()
1175 lo = root->left == left ? NULL : root->left; in vm_map_splay_findprev()
1205 * llist->max_free and max_free. Update with the in vm_map_splay_merge_left_walk()
1208 llist->max_free = max_free = in vm_map_splay_merge_left_walk()
1209 vm_size_max(llist->max_free, max_free); in vm_map_splay_merge_left_walk()
1210 vm_map_entry_swap(&llist->right, &tail); in vm_map_splay_merge_left_walk()
1213 root->left = tail; in vm_map_splay_merge_left_walk()
1226 max_free = root->start - llist->end; in vm_map_splay_merge_pred()
1231 root->left = header; in vm_map_splay_merge_pred()
1232 header->right = root; in vm_map_splay_merge_pred()
1249 root->left == llist ? root : root->left, in vm_map_splay_merge_left()
1262 * rlist->max_free and max_free. Update with the in vm_map_splay_merge_right_walk()
1265 rlist->max_free = max_free = in vm_map_splay_merge_right_walk()
1266 vm_size_max(rlist->max_free, max_free); in vm_map_splay_merge_right_walk()
1267 vm_map_entry_swap(&rlist->left, &tail); in vm_map_splay_merge_right_walk()
1270 root->right = tail; in vm_map_splay_merge_right_walk()
1283 max_free = rlist->start - root->end; in vm_map_splay_merge_succ()
1288 root->right = header; in vm_map_splay_merge_succ()
1289 header->left = root; in vm_map_splay_merge_succ()
1306 root->right == rlist ? root : root->right, in vm_map_splay_merge_right()
1315 * The Sleator and Tarjan top-down splay algorithm with the
1316 * following variation. Max_free must be computed bottom-up, so
1327 * is the predecessor of the first map entry, and the successor of the
1331 * adjacent entry (lower if possible) if addr is not in the tree.
1343 header = &map->header; in vm_map_splay()
1354 llist = root->right; in vm_map_splay()
1363 rlist = root->left; in vm_map_splay()
1370 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_splay()
1371 map->root = root; in vm_map_splay()
1379 * Insert/remove entries from maps. On linking, if new entry clips
1380 * existing entry, trim existing entry to avoid overlap, and manage
1381 * offsets. On unlinking, merge disappearing entry with neighbor, if
1386 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_link() argument
1392 "vm_map_entry_link: map %p, nentries %d, entry %p", map, in vm_map_entry_link()
1393 map->nentries, entry); in vm_map_entry_link()
1395 map->nentries++; in vm_map_entry_link()
1396 header = &map->header; in vm_map_entry_link()
1397 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_link()
1400 * The new entry does not overlap any existing entry in the in vm_map_entry_link()
1403 max_free_left = vm_map_splay_merge_pred(header, entry, llist); in vm_map_entry_link()
1404 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); in vm_map_entry_link()
1405 } else if (entry->start == root->start) { in vm_map_entry_link()
1407 * The new entry is a clone of root, with only the end field in vm_map_entry_link()
1408 * changed. The root entry will be shrunk to abut the new in vm_map_entry_link()
1409 * entry, and will be the right child of the new root entry in in vm_map_entry_link()
1412 KASSERT(entry->end < root->end, in vm_map_entry_link()
1413 ("%s: clip_start not within entry", __func__)); in vm_map_entry_link()
1415 if ((root->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_entry_link()
1416 root->offset += entry->end - root->start; in vm_map_entry_link()
1417 root->start = entry->end; in vm_map_entry_link()
1418 max_free_left = vm_map_splay_merge_pred(header, entry, llist); in vm_map_entry_link()
1419 max_free_right = root->max_free = vm_size_max( in vm_map_entry_link()
1420 vm_map_splay_merge_pred(entry, root, entry), in vm_map_entry_link()
1424 * The new entry is a clone of root, with only the start field in vm_map_entry_link()
1425 * changed. The root entry will be shrunk to abut the new in vm_map_entry_link()
1426 * entry, and will be the left child of the new root entry in in vm_map_entry_link()
1429 KASSERT(entry->end == root->end, in vm_map_entry_link()
1430 ("%s: clip_start not within entry", __func__)); in vm_map_entry_link()
1432 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_entry_link()
1433 entry->offset += entry->start - root->start; in vm_map_entry_link()
1434 root->end = entry->start; in vm_map_entry_link()
1435 max_free_left = root->max_free = vm_size_max( in vm_map_entry_link()
1437 vm_map_splay_merge_succ(entry, root, entry)); in vm_map_entry_link()
1438 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); in vm_map_entry_link()
1440 entry->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_link()
1441 map->root = entry; in vm_map_entry_link()
1451 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, in vm_map_entry_unlink() argument
1458 header = &map->header; in vm_map_entry_unlink()
1459 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_unlink()
1466 rlist->start = root->start; in vm_map_entry_unlink()
1467 MPASS((rlist->eflags & MAP_ENTRY_STACK_GAP) == 0); in vm_map_entry_unlink()
1468 rlist->offset = root->offset; in vm_map_entry_unlink()
1472 llist = root->right; in vm_map_entry_unlink()
1477 rlist = root->left; in vm_map_entry_unlink()
1481 header->left = header->right = header; in vm_map_entry_unlink()
1485 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_unlink()
1486 map->root = root; in vm_map_entry_unlink()
1488 map->nentries--; in vm_map_entry_unlink()
1489 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, in vm_map_entry_unlink()
1490 map->nentries, entry); in vm_map_entry_unlink()
1502 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) in vm_map_entry_resize() argument
1507 header = &map->header; in vm_map_entry_resize()
1508 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_resize()
1511 entry->end += grow_amount; in vm_map_entry_resize()
1512 root->max_free = vm_size_max( in vm_map_entry_resize()
1515 map->root = root; in vm_map_entry_resize()
1517 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", in vm_map_entry_resize()
1518 __func__, map, map->nentries, entry); in vm_map_entry_resize()
1524 * Finds the map entry containing (or
1525 * immediately preceding) the specified address
1526 * in the given map; the entry is returned
1527 * in the "entry" parameter. The boolean
1528 * result indicates whether the address is
1534 vm_offset_t address, in vm_map_lookup_entry() argument
1535 vm_map_entry_t *entry) /* OUT */ in vm_map_lookup_entry() argument
1541 * If the map is empty, then the map entry immediately preceding in vm_map_lookup_entry()
1542 * "address" is the map's header. in vm_map_lookup_entry()
1544 header = &map->header; in vm_map_lookup_entry()
1545 cur = map->root; in vm_map_lookup_entry()
1547 *entry = header; in vm_map_lookup_entry()
1550 if (address >= cur->start && cur->end > address) { in vm_map_lookup_entry()
1551 *entry = cur; in vm_map_lookup_entry()
1555 sx_try_upgrade(&map->lock)) { in vm_map_lookup_entry()
1562 cur = vm_map_splay(map, address); in vm_map_lookup_entry()
1565 sx_downgrade(&map->lock); in vm_map_lookup_entry()
1569 * If "address" is contained within a map entry, the new root in vm_map_lookup_entry()
1570 * is that map entry. Otherwise, the new root is a map entry in vm_map_lookup_entry()
1571 * immediately before or after "address". in vm_map_lookup_entry()
1573 if (address < cur->start) { in vm_map_lookup_entry()
1574 *entry = header; in vm_map_lookup_entry()
1577 *entry = cur; in vm_map_lookup_entry()
1578 return (address < cur->end); in vm_map_lookup_entry()
1582 * standard binary search tree lookup for "address". in vm_map_lookup_entry()
1586 if (address < cur->start) { in vm_map_lookup_entry()
1588 cur = cur->left; in vm_map_lookup_entry()
1591 } else if (cur->end <= address) { in vm_map_lookup_entry()
1593 cur = cur->right; in vm_map_lookup_entry()
1597 *entry = cur; in vm_map_lookup_entry()
1601 *entry = lbound; in vm_map_lookup_entry()
1607 * returns the newly inserted map entry in '*res'. In case the new
1608 * entry is coalesced with a neighbor or an existing entry was
1609 * resized, that entry is returned. In any case, the returned entry
1610 * covers the specified address range.
1641 if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE | in vm_map_insert1()
1646 * Find the entry prior to the proposed starting address; if it's part in vm_map_insert1()
1647 * of an existing entry, this range is bogus. in vm_map_insert1()
1653 * Assert that the next entry doesn't overlap the end point. in vm_map_insert1()
1656 if (next_entry->start < end) in vm_map_insert1()
1692 bdry = pagesizes[bidx] - 1; in vm_map_insert1()
1703 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) in vm_map_insert1()
1707 object->cred == NULL, in vm_map_insert1()
1709 cred = curthread->td_ucred; in vm_map_insert1()
1729 if ((object->flags & OBJ_ANON) != 0) { in vm_map_insert1()
1731 if (object->ref_count > 1 || object->shadow_count != 0) in vm_map_insert1()
1735 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1738 prev_entry->end == start && (prev_entry->cred == cred || in vm_map_insert1()
1739 (prev_entry->object.vm_object != NULL && in vm_map_insert1()
1740 prev_entry->object.vm_object->cred == cred)) && in vm_map_insert1()
1741 vm_object_coalesce(prev_entry->object.vm_object, in vm_map_insert1()
1742 prev_entry->offset, in vm_map_insert1()
1743 (vm_size_t)(prev_entry->end - prev_entry->start), in vm_map_insert1()
1744 (vm_size_t)(end - prev_entry->end), cred != NULL && in vm_map_insert1()
1748 * can extend the previous map entry to include the in vm_map_insert1()
1751 if (prev_entry->inheritance == inheritance && in vm_map_insert1()
1752 prev_entry->protection == prot && in vm_map_insert1()
1753 prev_entry->max_protection == max && in vm_map_insert1()
1754 prev_entry->wired_count == 0) { in vm_map_insert1()
1755 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == in vm_map_insert1()
1758 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1759 map->size += end - prev_entry->end; in vm_map_insert1()
1761 end - prev_entry->end); in vm_map_insert1()
1769 * map entry, we have to create a new map entry. We in vm_map_insert1()
1773 object = prev_entry->object.vm_object; in vm_map_insert1()
1774 offset = prev_entry->offset + in vm_map_insert1()
1775 (prev_entry->end - prev_entry->start); in vm_map_insert1()
1777 if (cred != NULL && object != NULL && object->cred != NULL && in vm_map_insert1()
1778 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_insert1()
1787 * Create a new entry in vm_map_insert1()
1790 new_entry->start = start; in vm_map_insert1()
1791 new_entry->end = end; in vm_map_insert1()
1792 new_entry->cred = NULL; in vm_map_insert1()
1794 new_entry->eflags = protoeflags; in vm_map_insert1()
1795 new_entry->object.vm_object = object; in vm_map_insert1()
1796 new_entry->offset = offset; in vm_map_insert1()
1798 new_entry->inheritance = inheritance; in vm_map_insert1()
1799 new_entry->protection = prot; in vm_map_insert1()
1800 new_entry->max_protection = max; in vm_map_insert1()
1801 new_entry->wired_count = 0; in vm_map_insert1()
1802 new_entry->wiring_thread = NULL; in vm_map_insert1()
1803 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; in vm_map_insert1()
1804 new_entry->next_read = start; in vm_map_insert1()
1808 new_entry->cred = cred; in vm_map_insert1()
1811 * Insert the new entry into the list in vm_map_insert1()
1814 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_insert1()
1815 map->size += new_entry->end - new_entry->start; in vm_map_insert1()
1818 * Try to coalesce the new entry with both the previous and next in vm_map_insert1()
1820 * with the previous entry when object is NULL. Here, we handle the in vm_map_insert1()
1828 end - start, cow & MAP_PREFAULT_PARTIAL); in vm_map_insert1()
1838 * specified address range.
1842 * If object is non-NULL, ref count must be bumped by caller
1843 * prior to making call to account for the new entry.
1858 * Find the first fit (lowest VM address) for "length" free bytes
1859 * beginning at address >= start in the given map.
1862 * contiguous free space between an entry in its subtree and a
1863 * neighbor of that entry. This allows finding a free region in
1869 * Returns: starting address if sufficient space,
1870 * vm_map_max(map)-length+1 if insufficient space.
1882 * Request must fit within min/max VM address and must avoid in vm_map_findspace()
1883 * address wrap. in vm_map_findspace()
1886 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) in vm_map_findspace()
1887 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1889 /* Empty tree means wide open address space. */ in vm_map_findspace()
1890 if (map->root == NULL) in vm_map_findspace()
1894 * After splay_split, if start is within an entry, push it to the start in vm_map_findspace()
1897 * enough; otherwise set gap_end to start skip gap-checking and move in vm_map_findspace()
1900 header = &map->header; in vm_map_findspace()
1902 gap_end = rlist->start; in vm_map_findspace()
1904 start = root->end; in vm_map_findspace()
1905 if (root->right != rlist) in vm_map_findspace()
1911 rlist = root->left; in vm_map_findspace()
1916 llist = root->right; in vm_map_findspace()
1920 root->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_findspace()
1921 map->root = root; in vm_map_findspace()
1923 if (length <= gap_end - start) in vm_map_findspace()
1927 if (root->right == header || length > root->right->max_free) in vm_map_findspace()
1928 return (vm_map_max(map) - length + 1); in vm_map_findspace()
1931 * Splay for the least large-enough gap in the right subtree. in vm_map_findspace()
1946 llist = root->right; in vm_map_findspace()
1949 root->max_free = vm_size_max(max_free_left, in vm_map_findspace()
1953 rlist = y->left; in vm_map_findspace()
1954 y->max_free = vm_size_max( in vm_map_findspace()
1957 root->max_free = vm_size_max(max_free_left, y->max_free); in vm_map_findspace()
1959 map->root = root; in vm_map_findspace()
1961 return (root->end); in vm_map_findspace()
1974 ("vm_map_fixed: non-NULL backing object for stack")); in vm_map_fixed()
2031 * specified alignment. Performs an address-ordered, first-fit search from
2032 * the given address "*addr", with an optional upper bound "max_addr". If the
2035 * superpage mappings. Returns KERN_SUCCESS and the address of the free space
2039 * of free space at the given address.
2051 ("caller failed to provide space %#jx at address %p", in vm_map_alignspace()
2055 * At the start of every iteration, the free space at address in vm_map_alignspace()
2072 * Test for address wrap on "*addr". A wrapped "*addr" could in vm_map_alignspace()
2073 * be a valid address, in which case vm_map_findspace() cannot in vm_map_alignspace()
2108 * vm_map_find finds an unallocated region in the target address
2110 * first-fit from the specified address; the region found is
2113 * If object is non-NULL, ref count must be bumped by caller
2114 * prior to making call to account for the new entry.
2142 ("non-NULL backing object for stack")); in vm_map_find_locked()
2146 (object->flags & OBJ_COLORED) == 0)) in vm_map_find_locked()
2153 en_aslr = (map->flags & MAP_ASLR) != 0; in vm_map_find_locked()
2155 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && in vm_map_find_locked()
2162 (map->flags & MAP_ASLR_IGNSTART) != 0) in vm_map_find_locked()
2166 curr_min_addr = map->anon_loc; in vm_map_find_locked()
2180 * We make up to two attempts to find address space in vm_map_find_locked()
2184 * perform a first-fit search of the available address in vm_map_find_locked()
2201 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? in vm_map_find_locked()
2225 pidx--; in vm_map_find_locked()
2236 /* And randomize the start address. */ in vm_map_find_locked()
2281 * Update the starting address for clustered anonymous memory mappings in vm_map_find_locked()
2282 * if a starting address was not previously defined or an ASLR restart in vm_map_find_locked()
2283 * placed an anonymous memory mapping at a lower address. in vm_map_find_locked()
2285 if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 || in vm_map_find_locked()
2286 *addr < map->anon_loc)) in vm_map_find_locked()
2287 map->anon_loc = *addr; in vm_map_find_locked()
2293 * additional parameter ("default_addr") and treats the given address
2295 * and not as the minimum address where the mapping is created.
2300 * the hint with "default_addr" as the minimum address for the
2327 * A map entry with any of the following flags set must not be merged with
2328 * another entry.
2335 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) in vm_map_mergeable_neighbors() argument
2338 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || in vm_map_mergeable_neighbors()
2339 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, in vm_map_mergeable_neighbors()
2341 prev, entry)); in vm_map_mergeable_neighbors()
2342 return (prev->end == entry->start && in vm_map_mergeable_neighbors()
2343 prev->object.vm_object == entry->object.vm_object && in vm_map_mergeable_neighbors()
2344 (prev->object.vm_object == NULL || in vm_map_mergeable_neighbors()
2345 prev->offset + (prev->end - prev->start) == entry->offset) && in vm_map_mergeable_neighbors()
2346 prev->eflags == entry->eflags && in vm_map_mergeable_neighbors()
2347 prev->protection == entry->protection && in vm_map_mergeable_neighbors()
2348 prev->max_protection == entry->max_protection && in vm_map_mergeable_neighbors()
2349 prev->inheritance == entry->inheritance && in vm_map_mergeable_neighbors()
2350 prev->wired_count == entry->wired_count && in vm_map_mergeable_neighbors()
2351 prev->cred == entry->cred); in vm_map_mergeable_neighbors()
2355 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) in vm_map_merged_neighbor_dispose() argument
2362 * kept without causing a lock-order reversal with the vnode lock. in vm_map_merged_neighbor_dispose()
2365 * object->un_pager.vnp.writemappings, the writemappings value in vm_map_merged_neighbor_dispose()
2366 * should not be adjusted when the entry is disposed of. in vm_map_merged_neighbor_dispose()
2368 if (entry->object.vm_object != NULL) in vm_map_merged_neighbor_dispose()
2369 vm_object_deallocate(entry->object.vm_object); in vm_map_merged_neighbor_dispose()
2370 if (entry->cred != NULL) in vm_map_merged_neighbor_dispose()
2371 crfree(entry->cred); in vm_map_merged_neighbor_dispose()
2372 vm_map_entry_dispose(map, entry); in vm_map_merged_neighbor_dispose()
2381 * the map entry that includes the first range.
2387 vm_map_entry_t entry) in vm_map_try_merge_entries() argument
2391 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && in vm_map_try_merge_entries()
2392 vm_map_mergeable_neighbors(prev_entry, entry)) { in vm_map_try_merge_entries()
2395 return (entry); in vm_map_try_merge_entries()
2403 * Allocate an object to back a map entry.
2406 vm_map_entry_back(vm_map_entry_t entry) in vm_map_entry_back() argument
2410 KASSERT(entry->object.vm_object == NULL, in vm_map_entry_back()
2411 ("map entry %p has backing object", entry)); in vm_map_entry_back()
2412 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_back()
2413 ("map entry %p is a submap", entry)); in vm_map_entry_back()
2414 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, in vm_map_entry_back()
2415 entry->cred, entry->end - entry->start); in vm_map_entry_back()
2416 entry->object.vm_object = object; in vm_map_entry_back()
2417 entry->offset = 0; in vm_map_entry_back()
2418 entry->cred = NULL; in vm_map_entry_back()
2424 * If there is no object backing this entry, create one. Otherwise, if
2425 * the entry has cred, give it to the backing object.
2428 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_charge_object() argument
2432 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_charge_object()
2433 ("map entry %p is a submap", entry)); in vm_map_entry_charge_object()
2434 if (entry->object.vm_object == NULL && !vm_map_is_system(map) && in vm_map_entry_charge_object()
2435 (entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_entry_charge_object()
2436 vm_map_entry_back(entry); in vm_map_entry_charge_object()
2437 else if (entry->object.vm_object != NULL && in vm_map_entry_charge_object()
2438 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && in vm_map_entry_charge_object()
2439 entry->cred != NULL) { in vm_map_entry_charge_object()
2440 VM_OBJECT_WLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2441 KASSERT(entry->object.vm_object->cred == NULL, in vm_map_entry_charge_object()
2442 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); in vm_map_entry_charge_object()
2443 entry->object.vm_object->cred = entry->cred; in vm_map_entry_charge_object()
2444 entry->object.vm_object->charge = entry->end - entry->start; in vm_map_entry_charge_object()
2445 VM_OBJECT_WUNLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2446 entry->cred = NULL; in vm_map_entry_charge_object()
2453 * Create a duplicate map entry for clipping.
2456 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_clone() argument
2464 * objects won't be created after the map entry is split. in vm_map_entry_clone()
2466 vm_map_entry_charge_object(map, entry); in vm_map_entry_clone()
2468 /* Clone the entry. */ in vm_map_entry_clone()
2470 *new_entry = *entry; in vm_map_entry_clone()
2471 if (new_entry->cred != NULL) in vm_map_entry_clone()
2472 crhold(entry->cred); in vm_map_entry_clone()
2473 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { in vm_map_entry_clone()
2474 vm_object_reference(new_entry->object.vm_object); in vm_map_entry_clone()
2477 * The object->un_pager.vnp.writemappings for the object of in vm_map_entry_clone()
2478 * MAP_ENTRY_WRITECNT type entry shall be kept as is here. The in vm_map_entry_clone()
2479 * virtual pages are re-distributed among the clipped entries, in vm_map_entry_clone()
2489 * Asserts that the given entry begins at or after
2490 * the specified address; if necessary,
2491 * it splits the entry into two.
2494 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr) in vm_map_clip_start() argument
2501 "%s: map %p entry %p start 0x%jx", __func__, map, entry, in vm_map_clip_start()
2504 if (startaddr <= entry->start) in vm_map_clip_start()
2508 KASSERT(entry->end > startaddr && entry->start < startaddr, in vm_map_clip_start()
2509 ("%s: invalid clip of entry %p", __func__, entry)); in vm_map_clip_start()
2511 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); in vm_map_clip_start()
2513 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_start()
2517 new_entry = vm_map_entry_clone(map, entry); in vm_map_clip_start()
2520 * Split off the front portion. Insert the new entry BEFORE this one, in vm_map_clip_start()
2521 * so that this entry has the specified starting address. in vm_map_clip_start()
2523 new_entry->end = startaddr; in vm_map_clip_start()
2531 * Find the entry at or just after 'start', and clip it if 'start' is in
2532 * the interior of the entry. Return entry after 'start', and in
2533 * prev_entry set the entry before 'start'.
2539 vm_map_entry_t entry; in vm_map_lookup_clip_start() local
2548 entry = *prev_entry; in vm_map_lookup_clip_start()
2549 rv = vm_map_clip_start(map, entry, start); in vm_map_lookup_clip_start()
2552 *prev_entry = vm_map_entry_pred(entry); in vm_map_lookup_clip_start()
2554 entry = vm_map_entry_succ(*prev_entry); in vm_map_lookup_clip_start()
2555 *res_entry = entry; in vm_map_lookup_clip_start()
2562 * Asserts that the given entry ends at or before
2563 * the specified address; if necessary,
2564 * it splits the entry into two.
2567 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr) in vm_map_clip_end() argument
2574 "%s: map %p entry %p end 0x%jx", __func__, map, entry, in vm_map_clip_end()
2577 if (endaddr >= entry->end) in vm_map_clip_end()
2581 KASSERT(entry->start < endaddr && entry->end > endaddr, in vm_map_clip_end()
2582 ("%s: invalid clip of entry %p", __func__, entry)); in vm_map_clip_end()
2584 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); in vm_map_clip_end()
2586 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0) in vm_map_clip_end()
2590 new_entry = vm_map_entry_clone(map, entry); in vm_map_clip_end()
2593 * Split off the back portion. Insert the new entry AFTER this one, in vm_map_clip_end()
2594 * so that this entry has the specified ending address. in vm_map_clip_end()
2596 new_entry->start = endaddr; in vm_map_clip_end()
2627 vm_map_entry_t entry; in vm_map_submap() local
2633 submap->flags |= MAP_IS_SUB_MAP; in vm_map_submap()
2638 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && in vm_map_submap()
2639 (entry->eflags & MAP_ENTRY_COW) == 0 && in vm_map_submap()
2640 entry->object.vm_object == NULL) { in vm_map_submap()
2641 result = vm_map_clip_start(map, entry, start); in vm_map_submap()
2644 result = vm_map_clip_end(map, entry, end); in vm_map_submap()
2647 entry->object.sub_map = submap; in vm_map_submap()
2648 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; in vm_map_submap()
2656 submap->flags &= ~MAP_IS_SUB_MAP; in vm_map_submap()
2671 * object's memory-resident pages. No further physical pages are
2674 * limited number of page mappings are created at the low-end of the
2675 * specified address range. (For this purpose, a superpage mapping
2677 * the specified address range are mapped.
2691 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2693 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { in vm_map_pmap_enter()
2694 pmap_object_init_pt(map->pmap, addr, object, pindex, in vm_map_pmap_enter()
2704 if (psize + pindex > object->size) { in vm_map_pmap_enter()
2705 if (pindex >= object->size) { in vm_map_pmap_enter()
2709 psize = object->size - pindex; in vm_map_pmap_enter()
2723 tmpidx = p->pindex - pindex; in vm_map_pmap_enter()
2738 for (psind = p->psind; psind > 0; psind--) { in vm_map_pmap_enter()
2740 (pagesizes[psind] - 1)) == 0) { in vm_map_pmap_enter()
2741 mask = atop(pagesizes[psind]) - 1; in vm_map_pmap_enter()
2752 pmap_enter_object(map->pmap, start, addr + in vm_map_pmap_enter()
2758 pmap_enter_object(map->pmap, start, addr + ptoa(psize), in vm_map_pmap_enter()
2764 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot, in vm_map_protect_guard() argument
2769 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0); in vm_map_protect_guard()
2770 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0) in vm_map_protect_guard()
2773 old_prot = PROT_EXTRACT(entry->offset); in vm_map_protect_guard()
2775 entry->offset = PROT_MAX(new_maxprot) | in vm_map_protect_guard()
2779 entry->offset = new_prot | PROT_MAX( in vm_map_protect_guard()
2780 PROT_MAX_EXTRACT(entry->offset)); in vm_map_protect_guard()
2788 * specified address region in the target map.
2794 vm_map_entry_t entry, first_entry, in_tran, prev_entry; in vm_map_protect() local
2815 if ((map->flags & MAP_WXORX) != 0 && in vm_map_protect()
2826 * update the protection on the map entry in between faults. in vm_map_protect()
2836 (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { in vm_map_protect()
2844 while (!CONTAINS_BITS(first_entry->eflags, in vm_map_protect()
2848 start = first_entry->start; in vm_map_protect()
2859 for (entry = first_entry; entry->start < end; in vm_map_protect()
2860 entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2861 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_protect()
2865 if ((entry->eflags & (MAP_ENTRY_GUARD | in vm_map_protect()
2868 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ? in vm_map_protect()
2869 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection; in vm_map_protect()
2874 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) in vm_map_protect()
2875 in_tran = entry; in vm_map_protect()
2879 * Postpone the operation until all in-transition map entries have in vm_map_protect()
2880 * stabilized. An in-transition entry might already have its pages in vm_map_protect()
2886 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_protect()
2893 * private (i.e., copy-on-write) mappings that are transitioning from in vm_map_protect()
2894 * read-only to read/write access. If a reservation fails, break out in vm_map_protect()
2903 for (entry = first_entry; entry->start < end; in vm_map_protect()
2904 entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2905 rv = vm_map_clip_end(map, entry, end); in vm_map_protect()
2912 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || in vm_map_protect()
2913 ENTRY_CHARGED(entry) || in vm_map_protect()
2914 (entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2917 cred = curthread->td_ucred; in vm_map_protect()
2918 obj = entry->object.vm_object; in vm_map_protect()
2921 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { in vm_map_protect()
2922 if (!swap_reserve(entry->end - entry->start)) { in vm_map_protect()
2924 end = entry->end; in vm_map_protect()
2928 entry->cred = cred; in vm_map_protect()
2933 if ((obj->flags & OBJ_SWAP) == 0) { in vm_map_protect()
2940 * we cannot distinguish between non-charged and in vm_map_protect()
2943 KASSERT(obj->charge == 0, in vm_map_protect()
2944 ("vm_map_protect: object %p overcharged (entry %p)", in vm_map_protect()
2945 obj, entry)); in vm_map_protect()
2946 if (!swap_reserve(ptoa(obj->size))) { in vm_map_protect()
2949 end = entry->end; in vm_map_protect()
2954 obj->cred = cred; in vm_map_protect()
2955 obj->charge = ptoa(obj->size); in vm_map_protect()
2964 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; in vm_map_protect()
2965 entry->start < end; in vm_map_protect()
2966 vm_map_try_merge_entries(map, prev_entry, entry), in vm_map_protect()
2967 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2971 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_protect()
2972 vm_map_protect_guard(entry, new_prot, new_maxprot, in vm_map_protect()
2977 old_prot = entry->protection; in vm_map_protect()
2980 entry->max_protection = new_maxprot; in vm_map_protect()
2981 entry->protection = new_maxprot & old_prot; in vm_map_protect()
2984 entry->protection = new_prot; in vm_map_protect()
2990 * copy-on-write and enable write access in the physical map. in vm_map_protect()
2992 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && in vm_map_protect()
2993 (entry->protection & VM_PROT_WRITE) != 0 && in vm_map_protect()
2995 vm_fault_copy_entry(map, map, entry, entry, NULL); in vm_map_protect()
2999 * about copy-on-write here. in vm_map_protect()
3001 if ((old_prot & ~entry->protection) != 0) { in vm_map_protect()
3002 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ in vm_map_protect() argument
3004 pmap_protect(map->pmap, entry->start, in vm_map_protect()
3005 entry->end, in vm_map_protect()
3006 entry->protection & MASK(entry)); in vm_map_protect()
3010 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_protect()
3030 vm_map_entry_t entry, prev_entry; in vm_map_madvise() local
3037 * various clipping operations. Otherwise we only need a read-lock in vm_map_madvise()
3066 * Locate starting entry and clip if necessary. in vm_map_madvise()
3075 * limited to the specified address range. in vm_map_madvise()
3077 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry); in vm_map_madvise()
3083 for (; entry->start < end; prev_entry = entry, in vm_map_madvise()
3084 entry = vm_map_entry_succ(entry)) { in vm_map_madvise()
3085 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_map_madvise()
3088 rv = vm_map_clip_end(map, entry, end); in vm_map_madvise()
3096 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3100 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3104 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3108 entry->eflags |= MAP_ENTRY_NOSYNC; in vm_map_madvise()
3111 entry->eflags &= ~MAP_ENTRY_NOSYNC; in vm_map_madvise()
3114 entry->eflags |= MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3117 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3122 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_madvise()
3124 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_madvise()
3136 if (!vm_map_lookup_entry(map, start, &entry)) in vm_map_madvise()
3137 entry = vm_map_entry_succ(entry); in vm_map_madvise()
3138 for (; entry->start < end; in vm_map_madvise()
3139 entry = vm_map_entry_succ(entry)) { in vm_map_madvise()
3142 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP | in vm_map_madvise()
3149 * we hold the VM map read-locked, neither the in vm_map_madvise()
3150 * entry's object nor the presence of a in vm_map_madvise()
3154 entry->object.vm_object != NULL && in vm_map_madvise()
3155 entry->object.vm_object->backing_object != NULL) in vm_map_madvise()
3158 pstart = OFF_TO_IDX(entry->offset); in vm_map_madvise()
3159 pend = pstart + atop(entry->end - entry->start); in vm_map_madvise()
3160 useStart = entry->start; in vm_map_madvise()
3161 useEnd = entry->end; in vm_map_madvise()
3163 if (entry->start < start) { in vm_map_madvise()
3164 pstart += atop(start - entry->start); in vm_map_madvise()
3167 if (entry->end > end) { in vm_map_madvise()
3168 pend -= atop(entry->end - end); in vm_map_madvise()
3186 pmap_advise(map->pmap, useStart, useEnd, in vm_map_madvise()
3189 vm_object_madvise(entry->object.vm_object, pstart, in vm_map_madvise()
3193 * Pre-populate paging structures in the in vm_map_madvise()
3198 entry->wired_count == 0) { in vm_map_madvise()
3201 entry->protection, in vm_map_madvise()
3202 entry->object.vm_object, in vm_map_madvise()
3204 ptoa(pend - pstart), in vm_map_madvise()
3217 * Sets the inheritance of the specified address
3226 vm_map_entry_t entry, lentry, prev_entry, start_entry; in vm_map_inherit() local
3245 if (vm_map_lookup_entry(map, end - 1, &lentry)) { in vm_map_inherit()
3251 for (entry = start_entry; entry->start < end; in vm_map_inherit()
3252 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_inherit()
3253 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) in vm_map_inherit()
3260 for (entry = start_entry; entry->start < end; prev_entry = entry, in vm_map_inherit()
3261 entry = vm_map_entry_succ(entry)) { in vm_map_inherit()
3262 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", in vm_map_inherit()
3263 entry, (uintmax_t)entry->end, (uintmax_t)end)); in vm_map_inherit()
3264 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || in vm_map_inherit()
3266 entry->inheritance = new_inheritance; in vm_map_inherit()
3267 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_inherit()
3269 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_inherit()
3278 * Release the map lock, and sleep until the entry is no longer in
3280 * another held the lock, lookup a possibly-changed entry at or after the
3281 * 'start' position of the old entry.
3287 vm_map_entry_t entry; in vm_map_entry_in_transition() local
3292 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_entry_in_transition()
3293 ("not in-tranition map entry %p", in_entry)); in vm_map_entry_in_transition()
3295 * We have not yet clipped the entry. in vm_map_entry_in_transition()
3297 start = MAX(in_start, in_entry->start); in vm_map_entry_in_transition()
3298 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_entry_in_transition()
3299 last_timestamp = map->timestamp; in vm_map_entry_in_transition()
3306 if (last_timestamp + 1 == map->timestamp) in vm_map_entry_in_transition()
3310 * Look again for the entry because the map was modified while it was in vm_map_entry_in_transition()
3311 * unlocked. Specifically, the entry may have been clipped, merged, or in vm_map_entry_in_transition()
3314 if (!vm_map_lookup_entry(map, start, &entry)) { in vm_map_entry_in_transition()
3319 entry = vm_map_entry_succ(entry); in vm_map_entry_in_transition()
3321 return (entry); in vm_map_entry_in_transition()
3333 vm_map_entry_t entry, first_entry, next_entry, prev_entry; in vm_map_unwire() local
3352 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_unwire()
3353 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_unwire()
3355 * We have not yet clipped the entry. in vm_map_unwire()
3358 &end, holes_ok, entry); in vm_map_unwire()
3360 if (entry == first_entry) { in vm_map_unwire()
3367 first_entry = (entry == first_entry) ? in vm_map_unwire()
3371 rv = vm_map_clip_start(map, entry, start); in vm_map_unwire()
3374 rv = vm_map_clip_end(map, entry, end); in vm_map_unwire()
3379 * Mark the entry in case the map lock is released. (See in vm_map_unwire()
3382 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_unwire()
3383 entry->wiring_thread == NULL, in vm_map_unwire()
3384 ("owned map entry %p", entry)); in vm_map_unwire()
3385 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3386 entry->wiring_thread = curthread; in vm_map_unwire()
3387 next_entry = vm_map_entry_succ(entry); in vm_map_unwire()
3393 entry->end < end && next_entry->start > entry->end) { in vm_map_unwire()
3394 end = entry->end; in vm_map_unwire()
3399 * If system unwiring, require that the entry is system wired. in vm_map_unwire()
3402 vm_map_entry_system_wired_count(entry) == 0) { in vm_map_unwire()
3403 end = entry->end; in vm_map_unwire()
3413 entry = vm_map_entry_succ(first_entry); in vm_map_unwire()
3416 entry = first_entry; in vm_map_unwire()
3418 for (; entry->start < end; in vm_map_unwire()
3419 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_unwire()
3426 * entry. Detect these cases and skip any entries in vm_map_unwire()
3429 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_unwire()
3430 entry->wiring_thread != curthread) { in vm_map_unwire()
3432 ("vm_map_unwire: !HOLESOK and new/changed entry")); in vm_map_unwire()
3437 (entry->eflags & MAP_ENTRY_USER_WIRED))) { in vm_map_unwire()
3438 if (entry->wired_count == 1) in vm_map_unwire()
3439 vm_map_entry_unwire(map, entry); in vm_map_unwire()
3441 entry->wired_count--; in vm_map_unwire()
3443 entry->eflags &= ~MAP_ENTRY_USER_WIRED; in vm_map_unwire()
3445 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_unwire()
3446 ("vm_map_unwire: in-transition flag missing %p", entry)); in vm_map_unwire()
3447 KASSERT(entry->wiring_thread == curthread, in vm_map_unwire()
3448 ("vm_map_unwire: alien wire %p", entry)); in vm_map_unwire()
3449 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3450 entry->wiring_thread = NULL; in vm_map_unwire()
3451 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_unwire()
3452 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_unwire()
3455 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_unwire()
3457 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_unwire()
3489 * Handle a wiring failure on the given entry.
3494 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, in vm_map_wire_entry_failure() argument
3499 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && in vm_map_wire_entry_failure()
3500 entry->wired_count == 1, in vm_map_wire_entry_failure()
3501 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); in vm_map_wire_entry_failure()
3502 KASSERT(failed_addr < entry->end, in vm_map_wire_entry_failure()
3503 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); in vm_map_wire_entry_failure()
3506 * If any pages at the start of this entry were successfully wired, in vm_map_wire_entry_failure()
3509 if (failed_addr > entry->start) { in vm_map_wire_entry_failure()
3510 pmap_unwire(map->pmap, entry->start, failed_addr); in vm_map_wire_entry_failure()
3511 vm_object_unwire(entry->object.vm_object, entry->offset, in vm_map_wire_entry_failure()
3512 failed_addr - entry->start, PQ_ACTIVE); in vm_map_wire_entry_failure()
3516 * Assign an out-of-range value to represent the failure to wire this in vm_map_wire_entry_failure()
3517 * entry. in vm_map_wire_entry_failure()
3519 entry->wired_count = -1; in vm_map_wire_entry_failure()
3542 vm_map_entry_t entry, first_entry, next_entry, prev_entry; in vm_map_wire_locked() local
3566 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_wire_locked()
3567 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_wire_locked()
3569 * We have not yet clipped the entry. in vm_map_wire_locked()
3572 &end, holes_ok, entry); in vm_map_wire_locked()
3574 if (entry == first_entry) in vm_map_wire_locked()
3579 first_entry = (entry == first_entry) ? in vm_map_wire_locked()
3583 rv = vm_map_clip_start(map, entry, start); in vm_map_wire_locked()
3586 rv = vm_map_clip_end(map, entry, end); in vm_map_wire_locked()
3591 * Mark the entry in case the map lock is released. (See in vm_map_wire_locked()
3594 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_wire_locked()
3595 entry->wiring_thread == NULL, in vm_map_wire_locked()
3596 ("owned map entry %p", entry)); in vm_map_wire_locked()
3597 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_wire_locked()
3598 entry->wiring_thread = curthread; in vm_map_wire_locked()
3599 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 in vm_map_wire_locked()
3600 || (entry->protection & prot) != prot) { in vm_map_wire_locked()
3601 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; in vm_map_wire_locked()
3603 end = entry->end; in vm_map_wire_locked()
3607 } else if (entry->wired_count == 0) { in vm_map_wire_locked()
3608 entry->wired_count++; in vm_map_wire_locked()
3610 npages = atop(entry->end - entry->start); in vm_map_wire_locked()
3612 vm_map_wire_entry_failure(map, entry, in vm_map_wire_locked()
3613 entry->start); in vm_map_wire_locked()
3614 end = entry->end; in vm_map_wire_locked()
3620 * Release the map lock, relying on the in-transition in vm_map_wire_locked()
3623 saved_start = entry->start; in vm_map_wire_locked()
3624 saved_end = entry->end; in vm_map_wire_locked()
3625 last_timestamp = map->timestamp; in vm_map_wire_locked()
3626 bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); in vm_map_wire_locked()
3644 if (last_timestamp + 1 != map->timestamp) { in vm_map_wire_locked()
3646 * Look again for the entry because the map was in vm_map_wire_locked()
3647 * modified while it was unlocked. The entry in vm_map_wire_locked()
3655 first_entry = (entry == first_entry) ? in vm_map_wire_locked()
3657 for (entry = next_entry; entry->end < saved_end; in vm_map_wire_locked()
3658 entry = vm_map_entry_succ(entry)) { in vm_map_wire_locked()
3666 faddr < entry->end) in vm_map_wire_locked()
3668 entry, faddr); in vm_map_wire_locked()
3672 vm_map_wire_entry_failure(map, entry, faddr); in vm_map_wire_locked()
3675 end = entry->end; in vm_map_wire_locked()
3679 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3680 entry->wired_count++; in vm_map_wire_locked()
3686 next_entry = vm_map_entry_succ(entry); in vm_map_wire_locked()
3688 entry->end < end && next_entry->start > entry->end) { in vm_map_wire_locked()
3689 end = entry->end; in vm_map_wire_locked()
3701 entry = vm_map_entry_succ(first_entry); in vm_map_wire_locked()
3704 entry = first_entry; in vm_map_wire_locked()
3706 for (; entry->start < end; in vm_map_wire_locked()
3707 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_wire_locked()
3714 * wiring this new mapping entry. Detect these cases in vm_map_wire_locked()
3717 * Another way to get an entry not marked with in vm_map_wire_locked()
3721 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_wire_locked()
3722 entry->wiring_thread != curthread) { in vm_map_wire_locked()
3724 ("vm_map_wire: !HOLESOK and new/changed entry")); in vm_map_wire_locked()
3728 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { in vm_map_wire_locked()
3732 entry->eflags |= MAP_ENTRY_USER_WIRED; in vm_map_wire_locked()
3733 } else if (entry->wired_count == -1) { in vm_map_wire_locked()
3735 * Wiring failed on this entry. Thus, unwiring is in vm_map_wire_locked()
3738 entry->wired_count = 0; in vm_map_wire_locked()
3740 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3742 * Undo the wiring. Wiring succeeded on this entry in vm_map_wire_locked()
3743 * but failed on a later entry. in vm_map_wire_locked()
3745 if (entry->wired_count == 1) { in vm_map_wire_locked()
3746 vm_map_entry_unwire(map, entry); in vm_map_wire_locked()
3749 atop(entry->end - entry->start)); in vm_map_wire_locked()
3751 entry->wired_count--; in vm_map_wire_locked()
3753 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_wire_locked()
3754 ("vm_map_wire: in-transition flag missing %p", entry)); in vm_map_wire_locked()
3755 KASSERT(entry->wiring_thread == curthread, in vm_map_wire_locked()
3756 ("vm_map_wire: alien wire %p", entry)); in vm_map_wire_locked()
3757 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | in vm_map_wire_locked()
3759 entry->wiring_thread = NULL; in vm_map_wire_locked()
3760 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_wire_locked()
3761 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_wire_locked()
3764 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_wire_locked()
3766 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_wire_locked()
3775 * Push any dirty cached pages in the address range to their pager.
3796 vm_map_entry_t entry, first_entry, next_entry; in vm_map_sync() local
3810 start = first_entry->start; in vm_map_sync()
3811 end = first_entry->end; in vm_map_sync()
3815 * Make a first pass to check for user-wired memory, holes, in vm_map_sync()
3818 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_sync()
3820 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { in vm_map_sync()
3824 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry); in vm_map_sync()
3826 ((start & (pagesizes[bdry_idx] - 1)) != 0 || in vm_map_sync()
3827 (end & (pagesizes[bdry_idx] - 1)) != 0)) { in vm_map_sync()
3832 next_entry = vm_map_entry_succ(entry); in vm_map_sync()
3833 if (end > entry->end && in vm_map_sync()
3834 entry->end != next_entry->start) { in vm_map_sync()
3841 pmap_remove(map->pmap, start, end); in vm_map_sync()
3848 for (entry = first_entry; entry->start < end;) { in vm_map_sync()
3849 offset = entry->offset + (start - entry->start); in vm_map_sync()
3850 size = (end <= entry->end ? end : entry->end) - start; in vm_map_sync()
3851 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_sync()
3856 smap = entry->object.sub_map; in vm_map_sync()
3859 tsize = tentry->end - offset; in vm_map_sync()
3862 object = tentry->object.vm_object; in vm_map_sync()
3863 offset = tentry->offset + (offset - tentry->start); in vm_map_sync()
3866 object = entry->object.vm_object; in vm_map_sync()
3869 last_timestamp = map->timestamp; in vm_map_sync()
3876 if (last_timestamp == map->timestamp || in vm_map_sync()
3877 !vm_map_lookup_entry(map, start, &entry)) in vm_map_sync()
3878 entry = vm_map_entry_succ(entry); in vm_map_sync()
3888 * Make the region specified by this entry pageable.
3894 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_unwire() argument
3899 KASSERT(entry->wired_count > 0, in vm_map_entry_unwire()
3900 ("vm_map_entry_unwire: entry %p isn't wired", entry)); in vm_map_entry_unwire()
3902 size = entry->end - entry->start; in vm_map_entry_unwire()
3903 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) in vm_map_entry_unwire()
3905 pmap_unwire(map->pmap, entry->start, entry->end); in vm_map_entry_unwire()
3906 vm_object_unwire(entry->object.vm_object, entry->offset, size, in vm_map_entry_unwire()
3908 entry->wired_count = 0; in vm_map_entry_unwire()
3912 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) in vm_map_entry_deallocate() argument
3915 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) in vm_map_entry_deallocate()
3916 vm_object_deallocate(entry->object.vm_object); in vm_map_entry_deallocate()
3917 uma_zfree(system_map ? kmapentzone : mapentzone, entry); in vm_map_entry_deallocate()
3923 * Deallocate the given entry from the target map.
3926 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_delete() argument
3932 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); in vm_map_entry_delete()
3933 object = entry->object.vm_object; in vm_map_entry_delete()
3935 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_entry_delete()
3936 MPASS(entry->cred == NULL); in vm_map_entry_delete()
3937 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); in vm_map_entry_delete()
3939 vm_map_entry_deallocate(entry, vm_map_is_system(map)); in vm_map_entry_delete()
3943 size = entry->end - entry->start; in vm_map_entry_delete()
3944 map->size -= size; in vm_map_entry_delete()
3946 if (entry->cred != NULL) { in vm_map_entry_delete()
3947 swap_release_by_cred(size, entry->cred); in vm_map_entry_delete()
3948 crfree(entry->cred); in vm_map_entry_delete()
3951 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { in vm_map_entry_delete()
3952 entry->object.vm_object = NULL; in vm_map_entry_delete()
3953 } else if ((object->flags & OBJ_ANON) != 0 || in vm_map_entry_delete()
3955 KASSERT(entry->cred == NULL || object->cred == NULL || in vm_map_entry_delete()
3956 (entry->eflags & MAP_ENTRY_NEEDS_COPY), in vm_map_entry_delete()
3957 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); in vm_map_entry_delete()
3958 offidxstart = OFF_TO_IDX(entry->offset); in vm_map_entry_delete()
3961 if (object->ref_count != 1 && in vm_map_entry_delete()
3962 ((object->flags & OBJ_ONEMAPPING) != 0 || in vm_map_entry_delete()
3974 if (offidxend >= object->size && in vm_map_entry_delete()
3975 offidxstart < object->size) { in vm_map_entry_delete()
3976 size1 = object->size; in vm_map_entry_delete()
3977 object->size = offidxstart; in vm_map_entry_delete()
3978 if (object->cred != NULL) { in vm_map_entry_delete()
3979 size1 -= object->size; in vm_map_entry_delete()
3980 KASSERT(object->charge >= ptoa(size1), in vm_map_entry_delete()
3983 object->cred); in vm_map_entry_delete()
3984 object->charge -= ptoa(size1); in vm_map_entry_delete()
3991 vm_map_entry_deallocate(entry, TRUE); in vm_map_entry_delete()
3993 entry->defer_next = curthread->td_map_def_user; in vm_map_entry_delete()
3994 curthread->td_map_def_user = entry; in vm_map_entry_delete()
4001 * Deallocates the given address range from the target
4007 vm_map_entry_t entry, next_entry, scratch_entry; in vm_map_delete() local
4019 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry); in vm_map_delete()
4022 for (; entry->start < end; entry = next_entry) { in vm_map_delete()
4024 * Wait for wiring or unwiring of an entry to complete. in vm_map_delete()
4028 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || in vm_map_delete()
4030 vm_map_entry_system_wired_count(entry) != 0)) { in vm_map_delete()
4034 saved_start = entry->start; in vm_map_delete()
4035 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_delete()
4036 last_timestamp = map->timestamp; in vm_map_delete()
4039 if (last_timestamp + 1 != map->timestamp) { in vm_map_delete()
4041 * Look again for the entry because the map was in vm_map_delete()
4043 * Specifically, the entry may have been in vm_map_delete()
4051 next_entry = entry; in vm_map_delete()
4056 rv = vm_map_clip_end(map, entry, end); in vm_map_delete()
4059 next_entry = vm_map_entry_succ(entry); in vm_map_delete()
4065 if (entry->wired_count != 0) in vm_map_delete()
4066 vm_map_entry_unwire(map, entry); in vm_map_delete()
4073 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in vm_map_delete()
4074 entry->object.vm_object != NULL) in vm_map_delete()
4075 pmap_map_delete(map->pmap, entry->start, entry->end); in vm_map_delete()
4078 * Delete the entry only after removing all pmap in vm_map_delete()
4083 vm_map_entry_delete(map, entry); in vm_map_delete()
4091 * Remove the given address range from the target map.
4110 * entire address region given. The entire region must be allocated.
4114 * might be mapped into a larger address space.
4124 vm_map_entry_t entry; in vm_map_check_protection() local
4129 entry = tmp_entry; in vm_map_check_protection()
4135 if (start < entry->start) in vm_map_check_protection()
4138 * Check protection associated with entry. in vm_map_check_protection()
4140 if ((entry->protection & protection) != protection) in vm_map_check_protection()
4142 /* go to next entry */ in vm_map_check_protection()
4143 start = entry->end; in vm_map_check_protection()
4144 entry = vm_map_entry_succ(entry); in vm_map_check_protection()
4153 * Copies a swap-backed object from an existing map entry to a
4165 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4167 if ((src_object->flags & OBJ_ANON) != 0) { in vm_map_copy_swap_object()
4170 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { in vm_map_copy_swap_object()
4172 src_object = src_entry->object.vm_object; in vm_map_copy_swap_object()
4179 if (src_entry->cred != NULL && in vm_map_copy_swap_object()
4180 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4181 KASSERT(src_object->cred == NULL, in vm_map_copy_swap_object()
4184 src_object->cred = src_entry->cred; in vm_map_copy_swap_object()
4185 src_object->charge = size; in vm_map_copy_swap_object()
4187 dst_entry->object.vm_object = src_object; in vm_map_copy_swap_object()
4189 cred = curthread->td_ucred; in vm_map_copy_swap_object()
4191 dst_entry->cred = cred; in vm_map_copy_swap_object()
4193 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { in vm_map_copy_swap_object()
4195 src_entry->cred = cred; in vm_map_copy_swap_object()
4204 * Copies the contents of the source entry to the destination
4205 * entry. The entries *must* be aligned properly.
4221 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) in vm_map_copy_entry()
4224 if (src_entry->wired_count == 0 || in vm_map_copy_entry()
4225 (src_entry->protection & VM_PROT_WRITE) == 0) { in vm_map_copy_entry()
4227 * If the source entry is marked needs_copy, it is already in vm_map_copy_entry()
4228 * write-protected. in vm_map_copy_entry()
4230 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && in vm_map_copy_entry()
4231 (src_entry->protection & VM_PROT_WRITE) != 0) { in vm_map_copy_entry()
4232 pmap_protect(src_map->pmap, in vm_map_copy_entry()
4233 src_entry->start, in vm_map_copy_entry()
4234 src_entry->end, in vm_map_copy_entry()
4235 src_entry->protection & ~VM_PROT_WRITE); in vm_map_copy_entry()
4241 size = src_entry->end - src_entry->start; in vm_map_copy_entry()
4242 if ((src_object = src_entry->object.vm_object) != NULL) { in vm_map_copy_entry()
4243 if ((src_object->flags & OBJ_SWAP) != 0) { in vm_map_copy_entry()
4247 src_object = src_entry->object.vm_object; in vm_map_copy_entry()
4250 dst_entry->object.vm_object = src_object; in vm_map_copy_entry()
4252 src_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4254 dst_entry->eflags |= MAP_ENTRY_COW | in vm_map_copy_entry()
4256 dst_entry->offset = src_entry->offset; in vm_map_copy_entry()
4257 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { in vm_map_copy_entry()
4261 * src_entry, since the entry is in vm_map_copy_entry()
4263 * fake entry that is used to in vm_map_copy_entry()
4264 * decrement object->un_pager writecount in vm_map_copy_entry()
4269 fake_entry->eflags = MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4270 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; in vm_map_copy_entry()
4272 fake_entry->object.vm_object = src_object; in vm_map_copy_entry()
4273 fake_entry->start = src_entry->start; in vm_map_copy_entry()
4274 fake_entry->end = src_entry->end; in vm_map_copy_entry()
4275 fake_entry->defer_next = in vm_map_copy_entry()
4276 curthread->td_map_def_user; in vm_map_copy_entry()
4277 curthread->td_map_def_user = fake_entry; in vm_map_copy_entry()
4280 pmap_copy(dst_map->pmap, src_map->pmap, in vm_map_copy_entry()
4281 dst_entry->start, dst_entry->end - dst_entry->start, in vm_map_copy_entry()
4282 src_entry->start); in vm_map_copy_entry()
4284 dst_entry->object.vm_object = NULL; in vm_map_copy_entry()
4285 if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_copy_entry()
4286 dst_entry->offset = 0; in vm_map_copy_entry()
4287 if (src_entry->cred != NULL) { in vm_map_copy_entry()
4288 dst_entry->cred = curthread->td_ucred; in vm_map_copy_entry()
4289 crhold(dst_entry->cred); in vm_map_copy_entry()
4295 * We don't want to make writeable wired pages copy-on-write. in vm_map_copy_entry()
4306 * Update the newly-forked vmspace each time a map entry is inherited
4308 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4312 vm_map_entry_t entry) in vmspace_map_entry_forked() argument
4317 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) in vmspace_map_entry_forked()
4319 entrysize = entry->end - entry->start; in vmspace_map_entry_forked()
4320 vm2->vm_map.size += entrysize; in vmspace_map_entry_forked()
4321 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) { in vmspace_map_entry_forked()
4322 vm2->vm_ssize += btoc(entrysize); in vmspace_map_entry_forked()
4323 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && in vmspace_map_entry_forked()
4324 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { in vmspace_map_entry_forked()
4325 newend = MIN(entry->end, in vmspace_map_entry_forked()
4326 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); in vmspace_map_entry_forked()
4327 vm2->vm_dsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4328 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && in vmspace_map_entry_forked()
4329 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { in vmspace_map_entry_forked()
4330 newend = MIN(entry->end, in vmspace_map_entry_forked()
4331 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); in vmspace_map_entry_forked()
4332 vm2->vm_tsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4357 old_map = &vm1->vm_map; in vmspace_fork()
4364 vm2->vm_taddr = vm1->vm_taddr; in vmspace_fork()
4365 vm2->vm_daddr = vm1->vm_daddr; in vmspace_fork()
4366 vm2->vm_maxsaddr = vm1->vm_maxsaddr; in vmspace_fork()
4367 vm2->vm_stacktop = vm1->vm_stacktop; in vmspace_fork()
4368 vm2->vm_shp_base = vm1->vm_shp_base; in vmspace_fork()
4370 if (old_map->busy) in vmspace_fork()
4372 new_map = &vm2->vm_map; in vmspace_fork()
4376 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); in vmspace_fork()
4378 sx_xunlock(&old_map->lock); in vmspace_fork()
4379 sx_xunlock(&new_map->lock); in vmspace_fork()
4385 new_map->anon_loc = old_map->anon_loc; in vmspace_fork()
4386 new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART | in vmspace_fork()
4390 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vmspace_fork()
4393 inh = old_entry->inheritance; in vmspace_fork()
4394 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && in vmspace_fork()
4404 * Clone the entry, creating the shared object if in vmspace_fork()
4407 object = old_entry->object.vm_object; in vmspace_fork()
4410 object = old_entry->object.vm_object; in vmspace_fork()
4418 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vmspace_fork()
4419 vm_object_shadow(&old_entry->object.vm_object, in vmspace_fork()
4420 &old_entry->offset, in vmspace_fork()
4421 old_entry->end - old_entry->start, in vmspace_fork()
4422 old_entry->cred, in vmspace_fork()
4425 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vmspace_fork()
4426 old_entry->cred = NULL; in vmspace_fork()
4434 object = old_entry->object.vm_object; in vmspace_fork()
4438 if (old_entry->cred != NULL) { in vmspace_fork()
4439 KASSERT(object->cred == NULL, in vmspace_fork()
4441 object->cred = old_entry->cred; in vmspace_fork()
4442 object->charge = old_entry->end - in vmspace_fork()
4443 old_entry->start; in vmspace_fork()
4444 old_entry->cred = NULL; in vmspace_fork()
4453 if (old_entry->eflags & MAP_ENTRY_WRITECNT && in vmspace_fork()
4454 object->type == OBJT_VNODE) { in vmspace_fork()
4455 KASSERT(((struct vnode *)object-> in vmspace_fork()
4456 handle)->v_writecount > 0, in vmspace_fork()
4459 KASSERT(object->un_pager.vnp. in vmspace_fork()
4468 * Clone the entry, referencing the shared object. in vmspace_fork()
4472 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4474 new_entry->wiring_thread = NULL; in vmspace_fork()
4475 new_entry->wired_count = 0; in vmspace_fork()
4476 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { in vmspace_fork()
4478 new_entry->start, new_entry->end); in vmspace_fork()
4483 * Insert the entry into the new map -- we know we're in vmspace_fork()
4492 pmap_copy(new_map->pmap, old_map->pmap, in vmspace_fork()
4493 new_entry->start, in vmspace_fork()
4494 (old_entry->end - old_entry->start), in vmspace_fork()
4495 old_entry->start); in vmspace_fork()
4500 * Clone the entry and link into the map. in vmspace_fork()
4505 * Copied entry is COW over the old object. in vmspace_fork()
4507 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | in vmspace_fork()
4509 new_entry->wiring_thread = NULL; in vmspace_fork()
4510 new_entry->wired_count = 0; in vmspace_fork()
4511 new_entry->object.vm_object = NULL; in vmspace_fork()
4512 new_entry->cred = NULL; in vmspace_fork()
4522 * Create a new anonymous mapping entry modelled from in vmspace_fork()
4528 new_entry->start = old_entry->start; in vmspace_fork()
4529 new_entry->end = old_entry->end; in vmspace_fork()
4530 new_entry->eflags = old_entry->eflags & in vmspace_fork()
4534 new_entry->protection = old_entry->protection; in vmspace_fork()
4535 new_entry->max_protection = old_entry->max_protection; in vmspace_fork()
4536 new_entry->inheritance = VM_INHERIT_ZERO; in vmspace_fork()
4541 new_entry->cred = curthread->td_ucred; in vmspace_fork()
4542 crhold(new_entry->cred); in vmspace_fork()
4543 *fork_charge += (new_entry->end - new_entry->start); in vmspace_fork()
4553 sx_xunlock(&old_map->lock); in vmspace_fork()
4554 sx_xunlock(&new_map->lock); in vmspace_fork()
4572 MPASS((map->flags & MAP_WIREFUTURE) == 0); in vm_map_stack()
4578 if (map->size + init_ssize > vmemlim) { in vm_map_stack()
4609 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_stack_locked()
4610 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_stack_locked()
4617 init_ssize = max_ssize - sgp; in vm_map_stack_locked()
4626 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) in vm_map_stack_locked()
4637 bot = addrbos + max_ssize - init_ssize; in vm_map_stack_locked()
4645 KASSERT(new_entry->end == top || new_entry->start == bot, in vm_map_stack_locked()
4646 ("Bad entry start/end for new stack entry")); in vm_map_stack_locked()
4647 KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, in vm_map_stack_locked()
4648 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); in vm_map_stack_locked()
4655 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0, in vm_map_stack_locked()
4656 ("entry %p not gap %#x", gap_entry, gap_entry->eflags)); in vm_map_stack_locked()
4657 KASSERT((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0, in vm_map_stack_locked()
4658 ("entry %p not stack gap %#x", gap_entry, in vm_map_stack_locked()
4659 gap_entry->eflags)); in vm_map_stack_locked()
4663 * read-ahead logic is never used for it. Re-use in vm_map_stack_locked()
4664 * next_read of the gap entry to store in vm_map_stack_locked()
4670 gap_entry->next_read = sgp; in vm_map_stack_locked()
4671 gap_entry->offset = prot | PROT_MAX(max); in vm_map_stack_locked()
4679 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we
4702 vm = p->p_vmspace; in vm_map_growstack()
4709 if (p != initproc && (map != &p->p_vmspace->vm_map || in vm_map_growstack()
4710 p->p_textvp == NULL)) in vm_map_growstack()
4722 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_growstack()
4724 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP) != 0) { in vm_map_growstack()
4726 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || in vm_map_growstack()
4727 stack_entry->start != gap_entry->end) in vm_map_growstack()
4729 grow_amount = round_page(stack_entry->start - addr); in vm_map_growstack()
4733 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || in vm_map_growstack()
4734 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : in vm_map_growstack()
4735 gap_entry->next_read; in vm_map_growstack()
4736 max_grow = gap_entry->end - gap_entry->start; in vm_map_growstack()
4739 max_grow -= guard; in vm_map_growstack()
4747 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && in vm_map_growstack()
4748 addr < (vm_offset_t)vm->vm_stacktop; in vm_map_growstack()
4749 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) in vm_map_growstack()
4756 ctob(vm->vm_ssize) + grow_amount)) { in vm_map_growstack()
4767 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { in vm_map_growstack()
4768 grow_amount = trunc_page((vm_size_t)stacklim) - in vm_map_growstack()
4769 ctob(vm->vm_ssize); in vm_map_growstack()
4776 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) in vm_map_growstack()
4777 grow_amount = limit - ctob(vm->vm_ssize); in vm_map_growstack()
4780 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4781 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { in vm_map_growstack()
4789 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { in vm_map_growstack()
4800 if (map->size + grow_amount > vmemlim) { in vm_map_growstack()
4807 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { in vm_map_growstack()
4826 prot = PROT_EXTRACT(gap_entry->offset); in vm_map_growstack()
4827 max = PROT_MAX_EXTRACT(gap_entry->offset); in vm_map_growstack()
4828 sgp = gap_entry->next_read; in vm_map_growstack()
4830 grow_start = gap_entry->end - grow_amount; in vm_map_growstack()
4831 if (gap_entry->start + grow_amount == gap_entry->end) { in vm_map_growstack()
4832 gap_start = gap_entry->start; in vm_map_growstack()
4833 gap_end = gap_entry->end; in vm_map_growstack()
4837 MPASS(gap_entry->start < gap_entry->end - grow_amount); in vm_map_growstack()
4838 vm_map_entry_resize(map, gap_entry, -grow_amount); in vm_map_growstack()
4850 gap_entry->next_read = sgp; in vm_map_growstack()
4851 gap_entry->offset = prot | PROT_MAX(max); in vm_map_growstack()
4858 vm->vm_ssize += btoc(grow_amount); in vm_map_growstack()
4863 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { in vm_map_growstack()
4874 error = racct_set(p, RACCT_VMEM, map->size); in vm_map_growstack()
4878 ptoa(pmap_wired_count(map->pmap))); in vm_map_growstack()
4881 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); in vm_map_growstack()
4897 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_exec()
4900 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, in vmspace_exec()
4905 newvmspace->vm_swrss = oldvmspace->vm_swrss; in vmspace_exec()
4914 p->p_vmspace = newvmspace; in vmspace_exec()
4916 if (p == curthread->td_proc) in vmspace_exec()
4918 curthread->td_pflags |= TDP_EXECVMSPC; in vmspace_exec()
4929 struct vmspace *oldvmspace = p->p_vmspace; in vmspace_unshare()
4935 * cannot concurrently transition 1 -> 2. in vmspace_unshare()
4937 if (refcount_load(&oldvmspace->vm_refcnt) == 1) in vmspace_unshare()
4943 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { in vmspace_unshare()
4948 p->p_vmspace = newvmspace; in vmspace_unshare()
4950 if (p == curthread->td_proc) in vmspace_unshare()
4960 * protection for a given virtual address in the
4988 vm_map_entry_t entry; in vm_map_lookup() local
5002 * Lookup the faulting address. in vm_map_lookup()
5009 entry = *out_entry; in vm_map_lookup()
5014 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_lookup()
5017 *var_map = map = entry->object.sub_map; in vm_map_lookup()
5025 prot = entry->protection; in vm_map_lookup()
5029 (entry->eflags & MAP_ENTRY_GUARD) != 0 && in vm_map_lookup()
5030 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 && in vm_map_lookup()
5031 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) in vm_map_lookup()
5039 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & in vm_map_lookup()
5042 ("entry %p flags %x", entry, entry->eflags)); in vm_map_lookup()
5044 (entry->max_protection & VM_PROT_WRITE) == 0 && in vm_map_lookup()
5045 (entry->eflags & MAP_ENTRY_COW) == 0) { in vm_map_lookup()
5054 *wired = (entry->wired_count != 0); in vm_map_lookup()
5056 fault_type = entry->protection; in vm_map_lookup()
5057 size = entry->end - entry->start; in vm_map_lookup()
5060 * If the entry was copy-on-write, we either ... in vm_map_lookup()
5062 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup()
5075 * -- one just moved from the map to the new in vm_map_lookup()
5081 if (entry->cred == NULL) { in vm_map_lookup()
5086 cred = curthread->td_ucred; in vm_map_lookup()
5093 entry->cred = cred; in vm_map_lookup()
5095 eobject = entry->object.vm_object; in vm_map_lookup()
5096 vm_object_shadow(&entry->object.vm_object, in vm_map_lookup()
5097 &entry->offset, size, entry->cred, false); in vm_map_lookup()
5098 if (eobject == entry->object.vm_object) { in vm_map_lookup()
5102 swap_release_by_cred(size, entry->cred); in vm_map_lookup()
5103 crfree(entry->cred); in vm_map_lookup()
5105 entry->cred = NULL; in vm_map_lookup()
5106 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vm_map_lookup()
5111 * We're attempting to read a copy-on-write page -- in vm_map_lookup()
5121 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) { in vm_map_lookup()
5124 entry->object.vm_object = vm_object_allocate_anon(atop(size), in vm_map_lookup()
5125 NULL, entry->cred, size); in vm_map_lookup()
5126 entry->offset = 0; in vm_map_lookup()
5127 entry->cred = NULL; in vm_map_lookup()
5132 * Return the object/offset from this entry. If the entry was in vm_map_lookup()
5133 * copy-on-write or empty, it has been fixed up. in vm_map_lookup()
5135 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup()
5136 *object = entry->object.vm_object; in vm_map_lookup()
5145 * Lookup the faulting address. A version of vm_map_lookup that returns
5158 vm_map_entry_t entry; in vm_map_lookup_locked() local
5164 * Lookup the faulting address. in vm_map_lookup_locked()
5169 entry = *out_entry; in vm_map_lookup_locked()
5172 * Fail if the entry refers to a submap. in vm_map_lookup_locked()
5174 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) in vm_map_lookup_locked()
5180 prot = entry->protection; in vm_map_lookup_locked()
5189 *wired = (entry->wired_count != 0); in vm_map_lookup_locked()
5191 fault_type = entry->protection; in vm_map_lookup_locked()
5193 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup_locked()
5195 * Fail if the entry was copy-on-write for a write fault. in vm_map_lookup_locked()
5200 * We're attempting to read a copy-on-write page -- in vm_map_lookup_locked()
5209 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) in vm_map_lookup_locked()
5213 * Return the object/offset from this entry. If the entry was in vm_map_lookup_locked()
5214 * copy-on-write or empty, it has been fixed up. in vm_map_lookup_locked()
5216 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup_locked()
5217 *object = entry->object.vm_object; in vm_map_lookup_locked()
5230 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) in vm_map_lookup_done() argument
5233 * Unlock the main-level map in vm_map_lookup_done()
5256 return (map->pmap); in vm_map_pmap_KBI()
5270 vm_map_entry_t entry, prev; in _vm_map_assert_consistent() local
5275 ++map->nupdates; in _vm_map_assert_consistent()
5280 header = prev = &map->header; in _vm_map_assert_consistent()
5281 VM_MAP_ENTRY_FOREACH(entry, map) { in _vm_map_assert_consistent()
5282 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5283 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5284 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5285 KASSERT(entry->start < entry->end, in _vm_map_assert_consistent()
5287 (uintmax_t)entry->start, (uintmax_t)entry->end)); in _vm_map_assert_consistent()
5288 KASSERT(entry->left == header || in _vm_map_assert_consistent()
5289 entry->left->start < entry->start, in _vm_map_assert_consistent()
5290 ("map %p left->start = %jx, start = %jx", map, in _vm_map_assert_consistent()
5291 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5292 KASSERT(entry->right == header || in _vm_map_assert_consistent()
5293 entry->start < entry->right->start, in _vm_map_assert_consistent()
5294 ("map %p start = %jx, right->start = %jx", map, in _vm_map_assert_consistent()
5295 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); in _vm_map_assert_consistent()
5296 cur = map->root; in _vm_map_assert_consistent()
5299 if (entry->start < cur->start) { in _vm_map_assert_consistent()
5301 cur = cur->left; in _vm_map_assert_consistent()
5304 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5305 } else if (cur->end <= entry->start) { in _vm_map_assert_consistent()
5307 cur = cur->right; in _vm_map_assert_consistent()
5310 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5312 KASSERT(cur == entry, in _vm_map_assert_consistent()
5314 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5318 max_left = vm_map_entry_max_free_left(entry, lbound); in _vm_map_assert_consistent()
5319 max_right = vm_map_entry_max_free_right(entry, ubound); in _vm_map_assert_consistent()
5320 KASSERT(entry->max_free == vm_size_max(max_left, max_right), in _vm_map_assert_consistent()
5322 (uintmax_t)entry->max_free, in _vm_map_assert_consistent()
5324 prev = entry; in _vm_map_assert_consistent()
5326 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5327 ("map %p prev->end = %jx, start = %jx", map, in _vm_map_assert_consistent()
5328 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5341 vm_map_entry_t entry, prev; in vm_map_print() local
5345 (void *)map->pmap, map->nentries, map->timestamp); in vm_map_print()
5348 prev = &map->header; in vm_map_print()
5349 VM_MAP_ENTRY_FOREACH(entry, map) { in vm_map_print()
5350 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", in vm_map_print()
5351 (void *)entry, (void *)entry->start, (void *)entry->end, in vm_map_print()
5352 entry->eflags); in vm_map_print()
5358 entry->protection, in vm_map_print()
5359 entry->max_protection, in vm_map_print()
5361 entry->inheritance]); in vm_map_print()
5362 if (entry->wired_count != 0) in vm_map_print()
5365 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_print()
5367 (void *)entry->object.sub_map, in vm_map_print()
5368 (uintmax_t)entry->offset); in vm_map_print()
5369 if (prev == &map->header || in vm_map_print()
5370 prev->object.sub_map != in vm_map_print()
5371 entry->object.sub_map) { in vm_map_print()
5373 vm_map_print((vm_map_t)entry->object.sub_map); in vm_map_print()
5374 db_indent -= 2; in vm_map_print()
5377 if (entry->cred != NULL) in vm_map_print()
5378 db_printf(", ruid %d", entry->cred->cr_ruid); in vm_map_print()
5380 (void *)entry->object.vm_object, in vm_map_print()
5381 (uintmax_t)entry->offset); in vm_map_print()
5382 if (entry->object.vm_object && entry->object.vm_object->cred) in vm_map_print()
5384 entry->object.vm_object->cred->cr_ruid, in vm_map_print()
5385 (uintmax_t)entry->object.vm_object->charge); in vm_map_print()
5386 if (entry->eflags & MAP_ENTRY_COW) in vm_map_print()
5388 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); in vm_map_print()
5391 if (prev == &map->header || in vm_map_print()
5392 prev->object.vm_object != in vm_map_print()
5393 entry->object.vm_object) { in vm_map_print()
5396 entry->object.vm_object, in vm_map_print()
5398 db_indent -= 2; in vm_map_print()
5401 prev = entry; in vm_map_print()
5403 db_indent -= 2; in vm_map_print()
5427 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, in DB_SHOW_COMMAND()
5428 (void *)vmspace_pmap(p->p_vmspace)); in DB_SHOW_COMMAND()
5430 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); in DB_SHOW_COMMAND()