Lines Matching full:entry

113  *	[That is, an entry is split into two, bordering at a
130 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
131 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
132 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
250 * User map and entry structures are allocated from the general purpose
261 * Disable the use of per-CPU buckets: map entry allocation is
264 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
268 /* Reserve an extra map entry for use when replenishing the reserve. */
275 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
534 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
540 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
542 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
544 object = entry->object.vm_object;
545 KASSERT(object != NULL, ("No object for text, entry %p", entry));
551 KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
552 entry, entry->object.vm_object));
556 * referenced by the entry we are processing, so it cannot go
583 vm_map_entry_t entry, next;
587 entry = td->td_map_def_user;
589 while (entry != NULL) {
590 next = entry->defer_next;
591 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
594 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
599 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
601 object = entry->object.vm_object;
603 vm_pager_release_writecount(object, entry->start,
604 entry->end);
606 vm_map_entry_set_vnode_text(entry, false);
607 vm_map_entry_deallocate(entry, FALSE);
608 entry = next;
925 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
927 uma_zfree(vm_map_is_system(map) ? kmapentzone : mapentzone, entry);
933 * Allocates a VM map entry for insertion.
934 * No entry fields are filled in.
949 * map entry, dipping into the reserve if necessary, and set a
978 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
980 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
1010 * Find the {predecessor, successor} of the entry by taking one step
1015 vm_map_entry_pred(vm_map_entry_t entry)
1019 prior = entry->left;
1020 if (prior->right->start < entry->start) {
1023 while (prior->right != entry);
1326 * is the predecessor of the first map entry, and the successor of the
1330 * adjacent entry (lower if possible) if addr is not in the tree.
1378 * Insert/remove entries from maps. On linking, if new entry clips
1379 * existing entry, trim existing entry to avoid overlap, and manage
1380 * offsets. On unlinking, merge disappearing entry with neighbor, if
1385 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1391 "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1392 map->nentries, entry);
1396 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1399 * The new entry does not overlap any existing entry in the
1402 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1403 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1404 } else if (entry->start == root->start) {
1406 * The new entry is a clone of root, with only the end field
1407 * changed. The root entry will be shrunk to abut the new
1408 * entry, and will be the right child of the new root entry in
1411 KASSERT(entry->end < root->end,
1412 ("%s: clip_start not within entry", __func__));
1415 root->offset += entry->end - root->start;
1416 root->start = entry->end;
1417 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1419 vm_map_splay_merge_pred(entry, root, entry),
1423 * The new entry is a clone of root, with only the start field
1424 * changed. The root entry will be shrunk to abut the new
1425 * entry, and will be the left child of the new root entry in
1428 KASSERT(entry->end == root->end,
1429 ("%s: clip_start not within entry", __func__));
1431 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
1432 entry->offset += entry->start - root->start;
1433 root->end = entry->start;
1436 vm_map_splay_merge_succ(entry, root, entry));
1437 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1439 entry->max_free = vm_size_max(max_free_left, max_free_right);
1440 map->root = entry;
1450 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1458 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1488 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1489 map->nentries, entry);
1501 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1507 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1510 entry->end += grow_amount;
1516 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1517 __func__, map, map->nentries, entry);
1523 * Finds the map entry containing (or
1525 * in the given map; the entry is returned
1526 * in the "entry" parameter. The boolean
1534 vm_map_entry_t *entry) /* OUT */
1540 * If the map is empty, then the map entry immediately preceding
1546 *entry = header;
1550 *entry = cur;
1568 * If "address" is contained within a map entry, the new root
1569 * is that map entry. Otherwise, the new root is a map entry
1573 *entry = header;
1576 *entry = cur;
1596 *entry = cur;
1600 *entry = lbound;
1606 * returns the newly inserted map entry in '*res'. In case the new
1607 * entry is coalesced with a neighbor or an existing entry was
1608 * resized, that entry is returned. In any case, the returned entry
1645 * Find the entry prior to the proposed starting address; if it's part
1646 * of an existing entry, this range is bogus.
1652 * Assert that the next entry doesn't overlap the end point.
1742 * can extend the previous map entry to include the
1763 * map entry, we have to create a new map entry. We
1781 * Create a new entry
1805 * Insert the new entry into the list
1812 * Try to coalesce the new entry with both the previous and next
1814 * with the previous entry when object is NULL. Here, we handle the
1837 * prior to making call to account for the new entry.
1856 * contiguous free space between an entry in its subtree and a
1857 * neighbor of that entry. This allows finding a free region in
1888 * After splay_split, if start is within an entry, push it to the start
2108 * prior to making call to account for the new entry.
2321 * A map entry with any of the following flags set must not be merged with
2322 * another entry.
2329 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2333 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2335 prev, entry));
2336 return (prev->end == entry->start &&
2337 prev->object.vm_object == entry->object.vm_object &&
2339 prev->offset + (prev->end - prev->start) == entry->offset) &&
2340 prev->eflags == entry->eflags &&
2341 prev->protection == entry->protection &&
2342 prev->max_protection == entry->max_protection &&
2343 prev->inheritance == entry->inheritance &&
2344 prev->wired_count == entry->wired_count &&
2345 prev->cred == entry->cred);
2349 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2360 * should not be adjusted when the entry is disposed of.
2362 if (entry->object.vm_object != NULL)
2363 vm_object_deallocate(entry->object.vm_object);
2364 if (entry->cred != NULL)
2365 crfree(entry->cred);
2366 vm_map_entry_dispose(map, entry);
2375 * the map entry that includes the first range.
2381 vm_map_entry_t entry)
2385 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2386 vm_map_mergeable_neighbors(prev_entry, entry)) {
2389 return (entry);
2397 * Allocate an object to back a map entry.
2400 vm_map_entry_back(vm_map_entry_t entry)
2404 KASSERT(entry->object.vm_object == NULL,
2405 ("map entry %p has backing object", entry));
2406 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2407 ("map entry %p is a submap", entry));
2408 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2409 entry->cred, entry->end - entry->start);
2410 entry->object.vm_object = object;
2411 entry->offset = 0;
2412 entry->cred = NULL;
2418 * If there is no object backing this entry, create one. Otherwise, if
2419 * the entry has cred, give it to the backing object.
2422 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2426 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2427 ("map entry %p is a submap", entry));
2428 if (entry->object.vm_object == NULL && !vm_map_is_system(map) &&
2429 (entry->eflags & MAP_ENTRY_GUARD) == 0)
2430 vm_map_entry_back(entry);
2431 else if (entry->object.vm_object != NULL &&
2432 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2433 entry->cred != NULL) {
2434 VM_OBJECT_WLOCK(entry->object.vm_object);
2435 KASSERT(entry->object.vm_object->cred == NULL,
2436 ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2437 entry->object.vm_object->cred = entry->cred;
2438 entry->object.vm_object->charge = entry->end - entry->start;
2439 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2440 entry->cred = NULL;
2447 * Create a duplicate map entry for clipping.
2450 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2458 * objects won't be created after the map entry is split.
2460 vm_map_entry_charge_object(map, entry);
2462 /* Clone the entry. */
2464 *new_entry = *entry;
2466 crhold(entry->cred);
2467 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2472 * MAP_ENTRY_WRITECNT type entry shall be kept as is here. The
2483 * Asserts that the given entry begins at or after
2485 * it splits the entry into two.
2488 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2495 "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2498 if (startaddr <= entry->start)
2502 KASSERT(entry->end > startaddr && entry->start < startaddr,
2503 ("%s: invalid clip of entry %p", __func__, entry));
2505 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
2511 new_entry = vm_map_entry_clone(map, entry);
2514 * Split off the front portion. Insert the new entry BEFORE this one,
2515 * so that this entry has the specified starting address.
2525 * Find the entry at or just after 'start', and clip it if 'start' is in
2526 * the interior of the entry. Return entry after 'start', and in
2527 * prev_entry set the entry before 'start'.
2533 vm_map_entry_t entry;
2542 entry = *prev_entry;
2543 rv = vm_map_clip_start(map, entry, start);
2546 *prev_entry = vm_map_entry_pred(entry);
2548 entry = vm_map_entry_succ(*prev_entry);
2549 *res_entry = entry;
2556 * Asserts that the given entry ends at or before
2558 * it splits the entry into two.
2561 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2568 "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2571 if (endaddr >= entry->end)
2575 KASSERT(entry->start < endaddr && entry->end > endaddr,
2576 ("%s: invalid clip of entry %p", __func__, entry));
2578 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
2584 new_entry = vm_map_entry_clone(map, entry);
2587 * Split off the back portion. Insert the new entry AFTER this one,
2588 * so that this entry has the specified ending address.
2621 vm_map_entry_t entry;
2632 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2633 (entry->eflags & MAP_ENTRY_COW) == 0 &&
2634 entry->object.vm_object == NULL) {
2635 result = vm_map_clip_start(map, entry, start);
2638 result = vm_map_clip_end(map, entry, end);
2641 entry->object.sub_map = submap;
2642 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2761 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot,
2766 MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
2767 if ((entry->eflags & MAP_ENTRY_STACK_GAP) == 0)
2770 old_prot = PROT_EXTRACT(entry->offset);
2772 entry->offset = PROT_MAX(new_maxprot) |
2776 entry->offset = new_prot | PROT_MAX(
2777 PROT_MAX_EXTRACT(entry->offset));
2791 vm_map_entry_t entry, first_entry, in_tran, prev_entry;
2823 * update the protection on the map entry in between faults.
2856 for (entry = first_entry; entry->start < end;
2857 entry = vm_map_entry_succ(entry)) {
2858 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2862 if ((entry->eflags & (MAP_ENTRY_GUARD |
2865 max_prot = (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 ?
2866 PROT_MAX_EXTRACT(entry->offset) : entry->max_protection;
2871 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2872 in_tran = entry;
2877 * stabilized. An in-transition entry might already have its pages
2900 for (entry = first_entry; entry->start < end;
2901 entry = vm_map_entry_succ(entry)) {
2902 rv = vm_map_clip_end(map, entry, end);
2909 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2910 ENTRY_CHARGED(entry) ||
2911 (entry->eflags & MAP_ENTRY_GUARD) != 0)
2915 obj = entry->object.vm_object;
2918 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2919 if (!swap_reserve(entry->end - entry->start)) {
2921 end = entry->end;
2925 entry->cred = cred;
2941 ("vm_map_protect: object %p overcharged (entry %p)",
2942 obj, entry));
2946 end = entry->end;
2961 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
2962 entry->start < end;
2963 vm_map_try_merge_entries(map, prev_entry, entry),
2964 prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2968 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
2969 vm_map_protect_guard(entry, new_prot, new_maxprot,
2974 old_prot = entry->protection;
2977 entry->max_protection = new_maxprot;
2978 entry->protection = new_maxprot & old_prot;
2981 entry->protection = new_prot;
2989 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2990 (entry->protection & VM_PROT_WRITE) != 0 &&
2992 vm_fault_copy_entry(map, map, entry, entry, NULL);
2998 if ((old_prot & ~entry->protection) != 0) {
2999 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
3001 pmap_protect(map->pmap, entry->start,
3002 entry->end,
3003 entry->protection & MASK(entry));
3007 vm_map_try_merge_entries(map, prev_entry, entry);
3027 vm_map_entry_t entry, prev_entry;
3063 * Locate starting entry and clip if necessary.
3074 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
3080 for (; entry->start < end; prev_entry = entry,
3081 entry = vm_map_entry_succ(entry)) {
3082 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
3085 rv = vm_map_clip_end(map, entry, end);
3093 vm_map_entry_set_behavior(entry,
3097 vm_map_entry_set_behavior(entry,
3101 vm_map_entry_set_behavior(entry,
3105 entry->eflags |= MAP_ENTRY_NOSYNC;
3108 entry->eflags &= ~MAP_ENTRY_NOSYNC;
3111 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
3114 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3119 vm_map_try_merge_entries(map, prev_entry, entry);
3121 vm_map_try_merge_entries(map, prev_entry, entry);
3133 if (!vm_map_lookup_entry(map, start, &entry))
3134 entry = vm_map_entry_succ(entry);
3135 for (; entry->start < end;
3136 entry = vm_map_entry_succ(entry)) {
3139 if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP |
3147 * entry's object nor the presence of a
3151 entry->object.vm_object != NULL &&
3152 entry->object.vm_object->backing_object != NULL)
3155 pstart = OFF_TO_IDX(entry->offset);
3156 pend = pstart + atop(entry->end - entry->start);
3157 useStart = entry->start;
3158 useEnd = entry->end;
3160 if (entry->start < start) {
3161 pstart += atop(start - entry->start);
3164 if (entry->end > end) {
3165 pend -= atop(entry->end - end);
3186 vm_object_madvise(entry->object.vm_object, pstart,
3195 entry->wired_count == 0) {
3198 entry->protection,
3199 entry->object.vm_object,
3223 vm_map_entry_t entry, lentry, prev_entry, start_entry;
3248 for (entry = start_entry; entry->start < end;
3249 prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3250 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3257 for (entry = start_entry; entry->start < end; prev_entry = entry,
3258 entry = vm_map_entry_succ(entry)) {
3259 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3260 entry, (uintmax_t)entry->end, (uintmax_t)end));
3261 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3263 entry->inheritance = new_inheritance;
3264 vm_map_try_merge_entries(map, prev_entry, entry);
3266 vm_map_try_merge_entries(map, prev_entry, entry);
3275 * Release the map lock, and sleep until the entry is no longer in
3277 * another held the lock, lookup a possibly-changed entry at or after the
3278 * 'start' position of the old entry.
3284 vm_map_entry_t entry;
3290 ("not in-tranition map entry %p", in_entry));
3292 * We have not yet clipped the entry.
3307 * Look again for the entry because the map was modified while it was
3308 * unlocked. Specifically, the entry may have been clipped, merged, or
3311 if (!vm_map_lookup_entry(map, start, &entry)) {
3316 entry = vm_map_entry_succ(entry);
3318 return (entry);
3330 vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3349 for (entry = first_entry; entry->start < end; entry = next_entry) {
3350 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3352 * We have not yet clipped the entry.
3355 &end, holes_ok, entry);
3357 if (entry == first_entry) {
3364 first_entry = (entry == first_entry) ?
3368 rv = vm_map_clip_start(map, entry, start);
3371 rv = vm_map_clip_end(map, entry, end);
3376 * Mark the entry in case the map lock is released. (See
3379 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3380 entry->wiring_thread == NULL,
3381 ("owned map entry %p", entry));
3382 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3383 entry->wiring_thread = curthread;
3384 next_entry = vm_map_entry_succ(entry);
3390 entry->end < end && next_entry->start > entry->end) {
3391 end = entry->end;
3396 * If system unwiring, require that the entry is system wired.
3399 vm_map_entry_system_wired_count(entry) == 0) {
3400 end = entry->end;
3410 entry = vm_map_entry_succ(first_entry);
3413 entry = first_entry;
3415 for (; entry->start < end;
3416 prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3423 * entry. Detect these cases and skip any entries
3426 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3427 entry->wiring_thread != curthread) {
3429 ("vm_map_unwire: !HOLESOK and new/changed entry"));
3434 (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3435 if (entry->wired_count == 1)
3436 vm_map_entry_unwire(map, entry);
3438 entry->wired_count--;
3440 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3442 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3443 ("vm_map_unwire: in-transition flag missing %p", entry));
3444 KASSERT(entry->wiring_thread == curthread,
3445 ("vm_map_unwire: alien wire %p", entry));
3446 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3447 entry->wiring_thread = NULL;
3448 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3449 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3452 vm_map_try_merge_entries(map, prev_entry, entry);
3454 vm_map_try_merge_entries(map, prev_entry, entry);
3486 * Handle a wiring failure on the given entry.
3491 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3496 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3497 entry->wired_count == 1,
3498 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3499 KASSERT(failed_addr < entry->end,
3500 ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3503 * If any pages at the start of this entry were successfully wired,
3506 if (failed_addr > entry->start) {
3507 pmap_unwire(map->pmap, entry->start, failed_addr);
3508 vm_object_unwire(entry->object.vm_object, entry->offset,
3509 failed_addr - entry->start, PQ_ACTIVE);
3514 * entry.
3516 entry->wired_count = -1;
3539 vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3563 for (entry = first_entry; entry->start < end; entry = next_entry) {
3564 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3566 * We have not yet clipped the entry.
3569 &end, holes_ok, entry);
3571 if (entry == first_entry)
3576 first_entry = (entry == first_entry) ?
3580 rv = vm_map_clip_start(map, entry, start);
3583 rv = vm_map_clip_end(map, entry, end);
3588 * Mark the entry in case the map lock is released. (See
3591 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3592 entry->wiring_thread == NULL,
3593 ("owned map entry %p", entry));
3594 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3595 entry->wiring_thread = curthread;
3596 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3597 || (entry->protection & prot) != prot) {
3598 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3600 end = entry->end;
3604 } else if (entry->wired_count == 0) {
3605 entry->wired_count++;
3607 npages = atop(entry->end - entry->start);
3609 vm_map_wire_entry_failure(map, entry,
3610 entry->start);
3611 end = entry->end;
3620 saved_start = entry->start;
3621 saved_end = entry->end;
3623 bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
3643 * Look again for the entry because the map was
3644 * modified while it was unlocked. The entry
3652 first_entry = (entry == first_entry) ?
3654 for (entry = next_entry; entry->end < saved_end;
3655 entry = vm_map_entry_succ(entry)) {
3663 faddr < entry->end)
3665 entry, faddr);
3669 vm_map_wire_entry_failure(map, entry, faddr);
3672 end = entry->end;
3676 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3677 entry->wired_count++;
3683 next_entry = vm_map_entry_succ(entry);
3685 entry->end < end && next_entry->start > entry->end) {
3686 end = entry->end;
3698 entry = vm_map_entry_succ(first_entry);
3701 entry = first_entry;
3703 for (; entry->start < end;
3704 prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3711 * wiring this new mapping entry. Detect these cases
3714 * Another way to get an entry not marked with
3718 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3719 entry->wiring_thread != curthread) {
3721 ("vm_map_wire: !HOLESOK and new/changed entry"));
3725 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3729 entry->eflags |= MAP_ENTRY_USER_WIRED;
3730 } else if (entry->wired_count == -1) {
3732 * Wiring failed on this entry. Thus, unwiring is
3735 entry->wired_count = 0;
3737 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3739 * Undo the wiring. Wiring succeeded on this entry
3740 * but failed on a later entry.
3742 if (entry->wired_count == 1) {
3743 vm_map_entry_unwire(map, entry);
3746 atop(entry->end - entry->start));
3748 entry->wired_count--;
3750 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3751 ("vm_map_wire: in-transition flag missing %p", entry));
3752 KASSERT(entry->wiring_thread == curthread,
3753 ("vm_map_wire: alien wire %p", entry));
3754 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3756 entry->wiring_thread = NULL;
3757 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3758 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3761 vm_map_try_merge_entries(map, prev_entry, entry);
3763 vm_map_try_merge_entries(map, prev_entry, entry);
3793 vm_map_entry_t entry, first_entry, next_entry;
3815 for (entry = first_entry; entry->start < end; entry = next_entry) {
3817 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3821 bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
3829 next_entry = vm_map_entry_succ(entry);
3830 if (end > entry->end &&
3831 entry->end != next_entry->start) {
3845 for (entry = first_entry; entry->start < end;) {
3846 offset = entry->offset + (start - entry->start);
3847 size = (end <= entry->end ? end : entry->end) - start;
3848 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3853 smap = entry->object.sub_map;
3863 object = entry->object.vm_object;
3874 !vm_map_lookup_entry(map, start, &entry))
3875 entry = vm_map_entry_succ(entry);
3885 * Make the region specified by this entry pageable.
3891 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3896 KASSERT(entry->wired_count > 0,
3897 ("vm_map_entry_unwire: entry %p isn't wired", entry));
3899 size = entry->end - entry->start;
3900 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3902 pmap_unwire(map->pmap, entry->start, entry->end);
3903 vm_object_unwire(entry->object.vm_object, entry->offset, size,
3905 entry->wired_count = 0;
3909 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3912 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3913 vm_object_deallocate(entry->object.vm_object);
3914 uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3920 * Deallocate the given entry from the target map.
3923 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3929 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3930 object = entry->object.vm_object;
3932 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3933 MPASS(entry->cred == NULL);
3934 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3936 vm_map_entry_deallocate(entry, vm_map_is_system(map));
3940 size = entry->end - entry->start;
3943 if (entry->cred != NULL) {
3944 swap_release_by_cred(size, entry->cred);
3945 crfree(entry->cred);
3948 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3949 entry->object.vm_object = NULL;
3952 KASSERT(entry->cred == NULL || object->cred == NULL ||
3953 (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3954 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3955 offidxstart = OFF_TO_IDX(entry->offset);
3988 vm_map_entry_deallocate(entry, TRUE);
3990 entry->defer_next = curthread->td_map_def_user;
3991 curthread->td_map_def_user = entry;
4004 vm_map_entry_t entry, next_entry, scratch_entry;
4016 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
4019 for (; entry->start < end; entry = next_entry) {
4021 * Wait for wiring or unwiring of an entry to complete.
4025 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
4027 vm_map_entry_system_wired_count(entry) != 0)) {
4031 saved_start = entry->start;
4032 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4038 * Look again for the entry because the map was
4040 * Specifically, the entry may have been
4048 next_entry = entry;
4053 rv = vm_map_clip_end(map, entry, end);
4056 next_entry = vm_map_entry_succ(entry);
4062 if (entry->wired_count != 0)
4063 vm_map_entry_unwire(map, entry);
4070 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
4071 entry->object.vm_object != NULL)
4072 pmap_map_delete(map->pmap, entry->start, entry->end);
4075 * Delete the entry only after removing all pmap
4080 vm_map_entry_delete(map, entry);
4121 vm_map_entry_t entry;
4126 entry = tmp_entry;
4132 if (start < entry->start)
4135 * Check protection associated with entry.
4137 if ((entry->protection & protection) != protection)
4139 /* go to next entry */
4140 start = entry->end;
4141 entry = vm_map_entry_succ(entry);
4150 * Copies a swap-backed object from an existing map entry to a
4201 * Copies the contents of the source entry to the destination
4202 * entry. The entries *must* be aligned properly.
4224 * If the source entry is marked needs_copy, it is already
4258 * src_entry, since the entry is
4260 * fake entry that is used to
4303 * Update the newly-forked vmspace each time a map entry is inherited
4309 vm_map_entry_t entry)
4314 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4316 entrysize = entry->end - entry->start;
4318 if ((entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
4320 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4321 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4322 newend = MIN(entry->end,
4324 vm2->vm_dsize += btoc(newend - entry->start);
4325 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4326 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4327 newend = MIN(entry->end,
4329 vm2->vm_tsize += btoc(newend - entry->start);
4401 * Clone the entry, creating the shared object if
4465 * Clone the entry, referencing the shared object.
4480 * Insert the entry into the new map -- we know we're
4497 * Clone the entry and link into the map.
4502 * Copied entry is COW over the old object.
4519 * Create a new anonymous mapping entry modelled from
4643 ("Bad entry start/end for new stack entry"));
4645 ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4653 ("entry %p not gap %#x", gap_entry, gap_entry->eflags));
4655 ("entry %p not stack gap %#x", gap_entry,
4661 * next_read of the gap entry to store
4676 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we
4985 vm_map_entry_t entry;
5006 entry = *out_entry;
5011 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5014 *var_map = map = entry->object.sub_map;
5022 prot = entry->protection;
5026 (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
5027 (entry->eflags & MAP_ENTRY_STACK_GAP) != 0 &&
5028 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
5036 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
5039 ("entry %p flags %x", entry, entry->eflags));
5041 (entry->max_protection & VM_PROT_WRITE) == 0 &&
5042 (entry->eflags & MAP_ENTRY_COW) == 0) {
5051 *wired = (entry->wired_count != 0);
5053 fault_type = entry->protection;
5054 size = entry->end - entry->start;
5057 * If the entry was copy-on-write, we either ...
5059 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5078 if (entry->cred == NULL) {
5090 entry->cred = cred;
5092 eobject = entry->object.vm_object;
5093 vm_object_shadow(&entry->object.vm_object,
5094 &entry->offset, size, entry->cred, false);
5095 if (eobject == entry->object.vm_object) {
5099 swap_release_by_cred(size, entry->cred);
5100 crfree(entry->cred);
5102 entry->cred = NULL;
5103 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
5118 if (entry->object.vm_object == NULL && !vm_map_is_system(map)) {
5121 entry->object.vm_object = vm_object_allocate_anon(atop(size),
5122 NULL, entry->cred, size);
5123 entry->offset = 0;
5124 entry->cred = NULL;
5129 * Return the object/offset from this entry. If the entry was
5132 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5133 *object = entry->object.vm_object;
5155 vm_map_entry_t entry;
5166 entry = *out_entry;
5169 * Fail if the entry refers to a submap.
5171 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5177 prot = entry->protection;
5186 *wired = (entry->wired_count != 0);
5188 fault_type = entry->protection;
5190 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5192 * Fail if the entry was copy-on-write for a write fault.
5206 if (entry->object.vm_object == NULL && !vm_map_is_system(map))
5210 * Return the object/offset from this entry. If the entry was
5213 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5214 *object = entry->object.vm_object;
5227 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5267 vm_map_entry_t entry, prev;
5278 VM_MAP_ENTRY_FOREACH(entry, map) {
5279 KASSERT(prev->end <= entry->start,
5281 (uintmax_t)prev->end, (uintmax_t)entry->start));
5282 KASSERT(entry->start < entry->end,
5284 (uintmax_t)entry->start, (uintmax_t)entry->end));
5285 KASSERT(entry->left == header ||
5286 entry->left->start < entry->start,
5288 (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5289 KASSERT(entry->right == header ||
5290 entry->start < entry->right->start,
5292 (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5296 if (entry->start < cur->start) {
5301 map, (uintmax_t)entry->start));
5302 } else if (cur->end <= entry->start) {
5307 map, (uintmax_t)entry->start));
5309 KASSERT(cur == entry,
5311 map, (uintmax_t)entry->start));
5315 max_left = vm_map_entry_max_free_left(entry, lbound);
5316 max_right = vm_map_entry_max_free_right(entry, ubound);
5317 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5319 (uintmax_t)entry->max_free,
5321 prev = entry;
5323 KASSERT(prev->end <= entry->start,
5325 (uintmax_t)prev->end, (uintmax_t)entry->start));
5338 vm_map_entry_t entry, prev;
5346 VM_MAP_ENTRY_FOREACH(entry, map) {
5347 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5348 (void *)entry, (void *)entry->start, (void *)entry->end,
5349 entry->eflags);
5355 entry->protection,
5356 entry->max_protection,
5358 entry->inheritance]);
5359 if (entry->wired_count != 0)
5362 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5364 (void *)entry->object.sub_map,
5365 (uintmax_t)entry->offset);
5368 entry->object.sub_map) {
5370 vm_map_print((vm_map_t)entry->object.sub_map);
5374 if (entry->cred != NULL)
5375 db_printf(", ruid %d", entry->cred->cr_ruid);
5377 (void *)entry->object.vm_object,
5378 (uintmax_t)entry->offset);
5379 if (entry->object.vm_object && entry->object.vm_object->cred)
5381 entry->object.vm_object->cred->cr_ruid,
5382 (uintmax_t)entry->object.vm_object->charge);
5383 if (entry->eflags & MAP_ENTRY_COW)
5385 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5390 entry->object.vm_object) {
5393 entry->object.vm_object,
5398 prev = entry;