Lines Matching defs:vma

79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
80 void vma_set_page_prot(struct vm_area_struct *vma)
82 unsigned long vm_flags = vma->vm_flags;
85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
86 if (vma_wants_writenotify(vma, vm_page_prot)) {
90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
169 goto out; /* mapping intersects with an existing non-brk vma. */
756 struct vm_area_struct *vma, *prev;
768 vma = find_vma_prev(mm, addr, &prev);
770 (!vma || addr + len <= vm_start_gap(vma)) &&
802 struct vm_area_struct *vma, *prev;
817 vma = find_vma_prev(mm, addr, &prev);
819 (!vma || addr + len <= vm_start_gap(vma)) &&
972 * find_vma_prev() - Find the VMA for a given address, or the next vma and
981 * Returns: The VMA associated with @addr, or the next vma.
982 * May return %NULL in the case of no vma at addr or above.
988 struct vm_area_struct *vma;
991 vma = vma_iter_load(&vmi);
993 if (!vma)
994 vma = vma_next(&vmi);
995 return vma;
1003 static int acct_stack_growth(struct vm_area_struct *vma,
1006 struct mm_struct *mm = vma->vm_mm;
1010 if (!may_expand_vm(mm, vma->vm_flags, grow))
1018 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1022 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1023 vma->vm_end - size;
1024 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1040 * vma is the last one with address > vma->vm_end. Have to extend vma.
1042 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1044 struct mm_struct *mm = vma->vm_mm;
1048 VMA_ITERATOR(vmi, mm, vma->vm_start);
1050 if (!(vma->vm_flags & VM_GROWSUP))
1066 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1076 vma_iter_config(&vmi, vma->vm_start, address);
1077 if (vma_iter_prealloc(&vmi, vma))
1081 if (unlikely(anon_vma_prepare(vma))) {
1087 vma_start_write(vma);
1089 * vma->vm_start/vm_end cannot change under us because the caller
1093 anon_vma_lock_write(vma->anon_vma);
1096 if (address > vma->vm_end) {
1099 size = address - vma->vm_start;
1100 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1103 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1104 error = acct_stack_growth(vma, size, grow);
1108 * we need to protect against concurrent vma
1112 * anon vma. So, we reuse mm->page_table_lock
1113 * to guard against concurrent vma expansions.
1116 if (vma->vm_flags & VM_LOCKED)
1118 vm_stat_account(mm, vma->vm_flags, grow);
1119 anon_vma_interval_tree_pre_update_vma(vma);
1120 vma->vm_end = address;
1122 vma_iter_store(&vmi, vma);
1123 anon_vma_interval_tree_post_update_vma(vma);
1126 perf_event_mmap(vma);
1130 anon_vma_unlock_write(vma->anon_vma);
1138 * vma is the first one with address < vma->vm_start. Have to extend vma.
1141 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1143 struct mm_struct *mm = vma->vm_mm;
1146 VMA_ITERATOR(vmi, mm, vma->vm_start);
1148 if (!(vma->vm_flags & VM_GROWSDOWN))
1166 vma_iter_next_range_limit(&vmi, vma->vm_start);
1168 vma_iter_config(&vmi, address, vma->vm_end);
1169 if (vma_iter_prealloc(&vmi, vma))
1173 if (unlikely(anon_vma_prepare(vma))) {
1179 vma_start_write(vma);
1181 * vma->vm_start/vm_end cannot change under us because the caller
1185 anon_vma_lock_write(vma->anon_vma);
1188 if (address < vma->vm_start) {
1191 size = vma->vm_end - address;
1192 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1195 if (grow <= vma->vm_pgoff) {
1196 error = acct_stack_growth(vma, size, grow);
1200 * we need to protect against concurrent vma
1204 * anon vma. So, we reuse mm->page_table_lock
1205 * to guard against concurrent vma expansions.
1208 if (vma->vm_flags & VM_LOCKED)
1210 vm_stat_account(mm, vma->vm_flags, grow);
1211 anon_vma_interval_tree_pre_update_vma(vma);
1212 vma->vm_start = address;
1213 vma->vm_pgoff -= grow;
1215 vma_iter_store(&vmi, vma);
1216 anon_vma_interval_tree_post_update_vma(vma);
1219 perf_event_mmap(vma);
1223 anon_vma_unlock_write(vma->anon_vma);
1246 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1248 return expand_upwards(vma, address);
1253 struct vm_area_struct *vma, *prev;
1256 vma = find_vma_prev(mm, addr, &prev);
1257 if (vma && (vma->vm_start <= addr))
1258 return vma;
1268 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1270 return expand_downwards(vma, address);
1275 struct vm_area_struct *vma;
1279 vma = find_vma(mm, addr);
1280 if (!vma)
1282 if (vma->vm_start <= addr)
1283 return vma;
1284 start = vma->vm_start;
1285 if (expand_stack_locked(vma, addr))
1287 if (vma->vm_flags & VM_LOCKED)
1288 populate_vma_page_range(vma, addr, start, NULL);
1289 return vma;
1295 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1296 #define vma_expand_down(vma, addr) (-EFAULT)
1300 #define vma_expand_up(vma,addr) (-EFAULT)
1301 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1310 * the lock for writing, tries to look up a vma again, expands it if
1313 * If no vma is found or it can't be expanded, it returns NULL and has
1318 struct vm_area_struct *vma, *prev;
1324 vma = find_vma_prev(mm, addr, &prev);
1325 if (vma && vma->vm_start <= addr)
1329 vma = prev;
1333 if (vma && !vma_expand_down(vma, addr))
1341 return vma;
1365 struct vm_area_struct *vma = NULL;
1380 vma = vma_find(&vmi, end);
1381 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
1382 if (vma) {
1393 vma = NULL;
1416 vma = vma_merge_new_range(&vmg);
1417 if (vma)
1424 vma = vm_area_alloc(mm);
1425 if (!vma)
1429 vma_set_range(vma, addr, end, pgoff);
1430 vm_flags_init(vma, vm_flags);
1431 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1434 vma->vm_file = get_file(file);
1440 error = call_mmap(file, vma);
1444 if (vma_is_shared_maywrite(vma)) {
1457 if (WARN_ON((addr != vma->vm_start)))
1463 * vma again as we may succeed this time.
1465 if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
1466 vmg.flags = vma->vm_flags;
1472 * ->mmap() can change vma->vm_file and fput
1473 * the original file. So fput the vma->vm_file
1478 fput(vma->vm_file);
1479 vm_area_free(vma);
1480 vma = merge;
1482 vm_flags = vma->vm_flags;
1488 vm_flags = vma->vm_flags;
1490 error = shmem_zero_setup(vma);
1494 vma_set_anonymous(vma);
1497 if (map_deny_write_exec(vma, vma->vm_flags)) {
1504 if (!arch_validate_flags(vma->vm_flags))
1508 if (vma_iter_prealloc(&vmi, vma))
1512 vma_start_write(vma);
1513 vma_iter_store(&vmi, vma);
1515 vma_link_file(vma);
1521 khugepaged_enter_vma(vma, vma->vm_flags);
1523 /* Once vma denies write, undo our temporary denial count */
1527 file = vma->vm_file;
1528 ksm_add_vma(vma);
1530 perf_event_mmap(vma);
1537 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1538 is_vm_hugetlb_page(vma) ||
1539 vma == get_gate_vma(current->mm))
1540 vm_flags_clear(vma, VM_LOCKED_MASK);
1546 uprobe_mmap(vma);
1549 * New (or expanded) vma always get soft dirty status.
1551 * be able to distinguish situation when vma area unmapped,
1555 vm_flags_set(vma, VM_SOFTDIRTY);
1557 vma_set_page_prot(vma);
1563 if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close)
1564 vma->vm_ops->close(vma);
1566 if (file || vma->vm_file) {
1568 fput(vma->vm_file);
1569 vma->vm_file = NULL;
1571 vma_iter_set(&vmi, vma->vm_end);
1573 unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
1578 vm_area_free(vma);
1629 struct vm_area_struct *vma;
1652 vma = vma_lookup(mm, start);
1654 if (!vma || !(vma->vm_flags & VM_SHARED))
1657 if (start + size > vma->vm_end) {
1658 VMA_ITERATOR(vmi, mm, vma->vm_end);
1659 struct vm_area_struct *next, *prev = vma;
1666 if (next->vm_file != vma->vm_file)
1669 if (next->vm_flags != vma->vm_flags)
1682 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1683 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1684 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1688 if (vma->vm_flags & VM_LOCKED)
1691 file = get_file(vma->vm_file);
1692 ret = security_mmap_file(vma->vm_file, prot, flags);
1695 ret = do_mmap(vma->vm_file, start, size,
1709 * do_brk_flags() - Increase the brk vma if the flags match.
1710 * @vmi: The vma iterator
1713 * @vma: The vma,
1720 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1740 * Expand the existing vma if possible; Note that singular lists do not
1743 if (vma && vma->vm_end == addr) {
1746 vmg.prev = vma;
1755 if (vma)
1757 /* create a vma struct for an anonymous mapping */
1758 vma = vm_area_alloc(mm);
1759 if (!vma)
1762 vma_set_anonymous(vma);
1763 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1764 vm_flags_init(vma, flags);
1765 vma->vm_page_prot = vm_get_page_prot(flags);
1766 vma_start_write(vma);
1767 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1772 ksm_add_vma(vma);
1774 perf_event_mmap(vma);
1779 vm_flags_set(vma, VM_SOFTDIRTY);
1783 vm_area_free(vma);
1792 struct vm_area_struct *vma = NULL;
1820 vma = vma_prev(&vmi);
1821 ret = do_brk_flags(&vmi, vma, addr, len, flags);
1840 struct vm_area_struct *vma;
1851 vma = vma_next(&vmi);
1852 if (!vma || unlikely(xa_is_zero(vma))) {
1864 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1874 vma_iter_set(&vmi, vma->vm_end);
1875 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1884 vma_iter_set(&vmi, vma->vm_end);
1886 if (vma->vm_flags & VM_ACCOUNT)
1887 nr_accounted += vma_pages(vma);
1888 remove_vma(vma, /* unreachable = */ true, /* closed = */ false);
1891 vma = vma_next(&vmi);
1892 } while (vma && likely(!xa_is_zero(vma)));
1907 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1909 unsigned long charged = vma_pages(vma);
1912 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
1915 if ((vma->vm_flags & VM_ACCOUNT) &&
1920 * The vm_pgoff of a purely anonymous vma should be irrelevant
1927 * vma, merges and splits can happen in a seamless way, just
1931 if (vma_is_anonymous(vma)) {
1932 BUG_ON(vma->anon_vma);
1933 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
1936 if (vma_link(mm, vma)) {
1937 if (vma->vm_flags & VM_ACCOUNT)
1989 * Close hook, called for unmap() and on the old vma for mremap().
1991 * Having a close hook prevents vma merging regardless of flags.
1993 static void special_mapping_close(struct vm_area_struct *vma)
1995 const struct vm_special_mapping *sm = vma->vm_private_data;
1998 sm->close(sm, vma);
2001 static const char *special_mapping_name(struct vm_area_struct *vma)
2003 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2019 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
2024 * the size of vma should stay the same over the special mapping's
2042 struct vm_area_struct *vma = vmf->vma;
2045 struct vm_special_mapping *sm = vma->vm_private_data;
2048 return sm->fault(sm, vmf->vma, vmf);
2072 struct vm_area_struct *vma;
2074 vma = vm_area_alloc(mm);
2075 if (unlikely(vma == NULL))
2078 vma_set_range(vma, addr, addr + len, 0);
2079 vm_flags_init(vma, (vm_flags | mm->def_flags |
2081 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2083 vma->vm_ops = ops;
2084 vma->vm_private_data = priv;
2086 ret = insert_vm_struct(mm, vma);
2090 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
2092 perf_event_mmap(vma);
2094 return vma;
2097 vm_area_free(vma);
2101 bool vma_is_special_mapping(const struct vm_area_struct *vma,
2104 return vma->vm_private_data == sm &&
2105 vma->vm_ops == &special_mapping_vmops;
2110 * Insert a new vma covering the given region, with the given flags.
2247 * this VMA and its relocated range, which will now reside at [vma->vm_start -
2248 * shift, vma->vm_end - shift).
2253 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2258 * 1) Use shift to calculate the new vma endpoints.
2259 * 2) Extend vma to cover both the old and new ranges. This ensures the
2261 * 3) Move vma's page tables to the new range.
2263 * 5) Shrink the vma to cover only the new range.
2266 struct mm_struct *mm = vma->vm_mm;
2267 unsigned long old_start = vma->vm_start;
2268 unsigned long old_end = vma->vm_end;
2273 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
2283 if (vma != vma_next(&vmi))
2290 vmg.vma = vma;
2298 if (length != move_page_tables(vma, old_start,
2299 vma, new_start, length, false, true))
2324 /* Shrink the vma to just the new range */
2325 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);