Lines Matching defs:vma

99 		struct vm_area_struct *vma;
101 vma = find_vma(current->mm, (unsigned long)objp);
102 if (vma)
103 return vma->vm_end - vma->vm_start;
154 struct vm_area_struct *vma;
157 vma = find_vma(current->mm, (unsigned long)ret);
158 if (vma)
159 vm_flags_set(vma, VM_USERMAP);
335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
342 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
349 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
356 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
536 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
538 vma->vm_mm = mm;
541 if (vma->vm_file) {
542 struct address_space *mapping = vma->vm_file->f_mapping;
546 vma_interval_tree_insert(vma, &mapping->i_mmap);
552 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
554 vma->vm_mm->map_count--;
556 if (vma->vm_file) {
558 mapping = vma->vm_file->f_mapping;
562 vma_interval_tree_remove(vma, &mapping->i_mmap);
571 static int delete_vma_from_mm(struct vm_area_struct *vma)
573 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
575 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
576 if (vma_iter_prealloc(&vmi, vma)) {
577 pr_warn("Allocation of vma tree for process %d failed\n",
581 cleanup_vma_from_mm(vma);
590 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
592 if (vma->vm_ops && vma->vm_ops->close)
593 vma->vm_ops->close(vma);
594 if (vma->vm_file)
595 fput(vma->vm_file);
596 put_nommu_region(vma->vm_region);
597 vm_area_free(vma);
630 struct vm_area_struct *vma;
633 vma = vma_lookup(mm, addr);
634 if (!vma)
636 return vma;
643 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
662 struct vm_area_struct *vma;
666 vma = vma_iter_load(&vmi);
667 if (!vma)
669 if (vma->vm_start != addr)
671 if (vma->vm_end != end)
674 return vma;
884 static int do_mmap_shared_file(struct vm_area_struct *vma)
888 ret = call_mmap(vma->vm_file, vma);
890 vma->vm_region->vm_top = vma->vm_region->vm_end;
905 static int do_mmap_private(struct vm_area_struct *vma,
921 ret = call_mmap(vma->vm_file, vma);
923 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
926 vma->vm_region->vm_top = vma->vm_region->vm_end;
956 vm_flags_set(vma, VM_MAPPED_COPY);
957 region->vm_flags = vma->vm_flags;
962 vma->vm_start = region->vm_start;
963 vma->vm_end = region->vm_start + len;
965 if (vma->vm_file) {
969 fpos = vma->vm_pgoff;
972 ret = kernel_read(vma->vm_file, base, len, &fpos);
981 vma_set_anonymous(vma);
988 region->vm_start = vma->vm_start = 0;
989 region->vm_end = vma->vm_end = 0;
1013 struct vm_area_struct *vma;
1043 vma = vm_area_alloc(current->mm);
1044 if (!vma)
1051 vm_flags_init(vma, vm_flags);
1052 vma->vm_pgoff = pgoff;
1056 vma->vm_file = get_file(file);
1108 vma->vm_region = pregion;
1111 vma->vm_start = start;
1112 vma->vm_end = start + len;
1115 vm_flags_set(vma, VM_MAPPED_COPY);
1117 ret = do_mmap_shared_file(vma);
1119 vma->vm_region = NULL;
1120 vma->vm_start = 0;
1121 vma->vm_end = 0;
1155 vma->vm_start = region->vm_start = addr;
1156 vma->vm_end = region->vm_end = addr + len;
1161 vma->vm_region = region;
1166 if (file && vma->vm_flags & VM_SHARED)
1167 ret = do_mmap_shared_file(vma);
1169 ret = do_mmap_private(vma, region, len, capabilities);
1175 if (!vma->vm_file &&
1182 result = vma->vm_start;
1187 BUG_ON(!vma->vm_region);
1188 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1189 if (vma_iter_prealloc(&vmi, vma))
1192 setup_vma_to_mm(vma, current->mm);
1195 vma_iter_store(&vmi, vma);
1199 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1215 if (vma->vm_file)
1216 fput(vma->vm_file);
1217 vm_area_free(vma);
1228 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1294 * split a vma into two pieces at address 'addr', a new vma is allocated either
1297 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1307 if (vma->vm_file)
1310 mm = vma->vm_mm;
1318 new = vm_area_dup(vma);
1323 *region = *vma->vm_region;
1326 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1336 if (vma_iter_prealloc(vmi, vma)) {
1337 pr_warn("Allocation of vma tree for process %d failed\n",
1346 delete_nommu_region(vma->vm_region);
1348 vma->vm_region->vm_start = vma->vm_start = addr;
1349 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1351 vma->vm_region->vm_end = vma->vm_end = addr;
1352 vma->vm_region->vm_top = addr;
1354 add_nommu_region(vma->vm_region);
1358 setup_vma_to_mm(vma, mm);
1376 struct vm_area_struct *vma,
1383 if (from > vma->vm_start) {
1384 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1386 vma->vm_end = from;
1388 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1390 vma->vm_start = to;
1394 region = vma->vm_region;
1420 struct vm_area_struct *vma;
1431 vma = vma_find(&vmi, end);
1432 if (!vma) {
1444 if (vma->vm_file) {
1446 if (start > vma->vm_start)
1448 if (end == vma->vm_end)
1450 vma = vma_find(&vmi, end);
1451 } while (vma);
1455 if (start == vma->vm_start && end == vma->vm_end)
1457 if (start < vma->vm_start || end > vma->vm_end)
1461 if (end != vma->vm_end && offset_in_page(end))
1463 if (start != vma->vm_start && end != vma->vm_end) {
1464 ret = split_vma(&vmi, vma, start, 1);
1468 return vmi_shrink_vma(&vmi, vma, start, end);
1472 if (delete_vma_from_mm(vma))
1475 delete_vma(mm, vma);
1502 struct vm_area_struct *vma;
1514 for_each_vma(vmi, vma) {
1515 cleanup_vma_from_mm(vma);
1516 delete_vma(mm, vma);
1537 struct vm_area_struct *vma;
1551 vma = find_vma_exact(current->mm, addr, old_len);
1552 if (!vma)
1555 if (vma->vm_end != vma->vm_start + old_len)
1558 if (is_nommu_shared_mapping(vma->vm_flags))
1561 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1565 vma->vm_end = vma->vm_start + new_len;
1566 return vma->vm_start;
1581 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1587 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1592 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1595 unsigned long vm_len = vma->vm_end - vma->vm_start;
1597 pfn += vma->vm_pgoff;
1598 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1602 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1605 unsigned int size = vma->vm_end - vma->vm_start;
1607 if (!(vma->vm_flags & VM_USERMAP))
1610 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1611 vma->vm_end = vma->vm_start + size;
1635 struct vm_area_struct *vma;
1642 vma = find_vma(mm, addr);
1643 if (vma) {
1645 if (addr + len >= vma->vm_end)
1646 len = vma->vm_end - addr;
1649 if (write && vma->vm_flags & VM_MAYWRITE)
1650 copy_to_user_page(vma, NULL, addr,
1652 else if (!write && vma->vm_flags & VM_MAYREAD)
1653 copy_from_user_page(vma, NULL, addr,
1719 struct vm_area_struct *vma;
1731 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1734 if (vma->vm_flags & VM_SHARED) {
1747 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1748 if (!(vma->vm_flags & VM_SHARED))
1751 region = vma->vm_region;