| /linux/mm/ |
| H A D | nommu.c | 96 return vma->vm_end - vma->vm_start; in kobjsize() 448 BUG_ON(last->vm_end <= last->vm_start); in validate_nommu_regions() 449 BUG_ON(last->vm_top < last->vm_end); in validate_nommu_regions() 455 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions() 456 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions() 601 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in delete_vma_from_mm() 680 if (vma->vm_end != end) in find_vma_exact() 899 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file() 935 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private() 968 region->vm_end = region->vm_start + len; in do_mmap_private() [all …]
|
| H A D | vma.c | 283 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare() 287 vp->adj_next->vm_end); in vma_prepare() 375 vp->remove->vm_end); in vma_complete() 383 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); in vma_complete() 417 return vmg->prev && vmg->prev->vm_end == vmg->start && in can_vma_merge_left() 478 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end); in unmap_region() 479 mas_set(mas, vma->vm_end); in unmap_region() 480 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region() 500 WARN_ON(vma->vm_end <= addr); in __split_vma() 513 new->vm_end = addr; in __split_vma() [all …]
|
| H A D | vma_exec.c | 34 unsigned long old_end = vma->vm_end; in relocate_vma_down() 139 vma->vm_end = STACK_TOP_MAX; in create_init_stack_vma() 140 vma->vm_start = vma->vm_end - PAGE_SIZE; in create_init_stack_vma() 153 *top_mem_p = vma->vm_end - sizeof(void *); in create_init_stack_vma()
|
| H A D | msync.c | 90 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() 91 start = vma->vm_end; in SYSCALL_DEFINE3() 107 vma = find_vma(mm, vma->vm_end); in SYSCALL_DEFINE3()
|
| H A D | mremap.c | 1053 if (!err && vma->vm_end != old_addr + old_len) in prep_move_vma() 1091 unsigned long vm_end; in unmap_source_vma() local 1123 vm_end = vma->vm_end; in unmap_source_vma() 1169 if (vm_end > end) { in unmap_source_vma() 1255 unsigned long old_end = vrm->vma->vm_end; in dontunmap_complete() 1421 unsigned long end = vma->vm_end + delta; in vma_expandable() 1423 if (end < vma->vm_end) /* overflow */ in vma_expandable() 1425 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) in vma_expandable() 1437 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; in vrm_can_expand_in_place() 1460 VMA_ITERATOR(vmi, mm, vma->vm_end); in expand_vma_in_place() [all …]
|
| H A D | mseal.c | 49 prev_end = vma->vm_end; in range_contains_unmapped() 69 const unsigned long curr_end = MIN(vma->vm_end, end); in mseal_apply()
|
| H A D | vma_init.c | 47 dest->vm_end = src->vm_end; in vm_area_init_from()
|
| H A D | mmap_lock.c | 281 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { in lock_vma_under_rcu() 352 if (unlikely(from_addr >= vma->vm_end)) in lock_next_vma() 379 vma_iter_set(vmi, IS_ERR_OR_NULL(vma) ? from_addr : vma->vm_end); in lock_next_vma()
|
| H A D | vma.h | 216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 526 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store_overwrite() 531 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store_overwrite() 540 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_overwrite()
|
| H A D | pagewalk.c | 491 next = min(end, vma->vm_end); in walk_page_range_mm_unsafe() 492 vma = find_vma(mm, vma->vm_end); in walk_page_range_mm_unsafe() 709 if (start < vma->vm_start || end > vma->vm_end) in walk_page_range_vma_unsafe() 744 return __walk_page_range(vma->vm_start, vma->vm_end, &walk); in walk_page_vma() 812 err = walk_page_test(vma->vm_start, vma->vm_end, &walk); in walk_page_mapping() 899 if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end)) in folio_walk_start()
|
| H A D | userfaultfd.c | 27 if (dst_end > dst_vma->vm_end) in validate_dst_vma() 914 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, in uffd_wp_range() 984 _end = min(dst_vma->vm_end, end); in mwriteprotect_range() 1592 if (src_start >= vma->vm_start && src_start < vma->vm_end) in find_vmas_mm_locked() 1622 if (src_start >= vma->vm_start && src_start < vma->vm_end) { in uffd_move_lock() 1806 if (src_start + len > src_vma->vm_end) in move_pages() 1811 if (dst_start + len > dst_vma->vm_end) in move_pages() 1988 if (start == vma->vm_start && end == vma->vm_end) in userfaultfd_clear_vma() 2043 vma_end = min(end, vma->vm_end); in userfaultfd_register_range() 2065 start = vma->vm_end; in userfaultfd_register_range() [all …]
|
| H A D | mmap.c | 972 populate_vma_page_range(prev, addr, prev->vm_end, NULL); in find_extend_vma_locked() 1166 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5() 1167 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5() 1172 if (next->vm_start != prev->vm_end) in SYSCALL_DEFINE5() 1181 if (start + size <= next->vm_end) in SYSCALL_DEFINE5() 1287 vma_iter_set(&vmi, vma->vm_end); in exit_mmap() 1297 vma_iter_set(&vmi, vma->vm_end); in exit_mmap() 1757 mpnt->vm_end, GFP_KERNEL); in dup_mmap() 1852 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); in dup_mmap()
|
| /linux/tools/testing/vma/ |
| H A D | vma.c | 78 vma->vm_end = end; in alloc_vma() 327 ASSERT_EQ(vma->vm_end, 0x3000); in test_simple_merge() 359 ASSERT_EQ(vma->vm_end, 0x2000); in test_simple_modify() 371 ASSERT_EQ(vma->vm_end, 0x1000); in test_simple_modify() 380 ASSERT_EQ(vma->vm_end, 0x2000); in test_simple_modify() 389 ASSERT_EQ(vma->vm_end, 0x3000); in test_simple_modify() 417 ASSERT_EQ(vma->vm_end, 0x3000); in test_simple_expand() 438 ASSERT_EQ(vma->vm_end, 0x1000); in test_simple_shrink() 530 ASSERT_EQ(vma->vm_end, 0x4000); in __test_merge_new() 549 ASSERT_EQ(vma->vm_end, 0x5000); in __test_merge_new() [all …]
|
| H A D | vma_internal.h | 655 unsigned long vm_end; member 995 vma->vm_end = end; in vma_set_range() 1099 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages() 1470 unsigned long vm_end = vma->vm_end; in vm_end_gap() local 1473 vm_end += stack_guard_gap; in vm_end_gap() 1474 if (vm_end < vma->vm_end) in vm_end_gap() 1475 vm_end = -PAGE_SIZE; in vm_end_gap() 1477 return vm_end; in vm_end_gap() 1770 .end = vma->vm_end, in __compat_vma_mmap()
|
| /linux/mm/damon/tests/ |
| H A D | vaddr-kunit.h | 28 mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); in __link_vmas() 72 (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, in damon_test_three_regions_in_vmas() 73 (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, in damon_test_three_regions_in_vmas() 74 (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, in damon_test_three_regions_in_vmas() 75 (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, in damon_test_three_regions_in_vmas() 76 (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, in damon_test_three_regions_in_vmas() 77 (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, in damon_test_three_regions_in_vmas()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | iters_task_vma.c | 14 __u64 vm_end; member 35 vm_ranges[seen].vm_end = vma->vm_end; in iter_task_vma_for_each()
|
| /linux/fs/proc/ |
| H A D | task_nommu.c | 35 size += region->vm_end - region->vm_start; in task_mem() 37 size = vma->vm_end - vma->vm_start; in task_mem() 46 slack = region->vm_end - vma->vm_end; in task_mem() 89 vsize += vma->vm_end - vma->vm_start; in task_vsize() 109 size += region->vm_end - region->vm_start; in task_statm() 150 vma->vm_end, in nommu_vma_show()
|
| /linux/scripts/coccinelle/api/ |
| H A D | vma_pages.cocci | 22 * (vma->vm_end - vma->vm_start) >> PAGE_SHIFT 32 - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) 44 (vma->vm_end@p - vma->vm_start) >> PAGE_SHIFT
|
| /linux/arch/s390/mm/ |
| H A D | gmap_helpers.c | 105 zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL); in gmap_helper_discard() 106 vmaddr = vma->vm_end; in gmap_helper_discard() 173 rc = walk_page_range_vma(vma, addr, vma->vm_end, in __gmap_helper_unshare_zeropages()
|
| /linux/include/trace/events/ |
| H A D | fs_dax.h | 17 __field(unsigned long, vm_end) 30 __entry->vm_end = vmf->vma->vm_end; 47 __entry->vm_end,
|
| /linux/drivers/media/common/videobuf2/ |
| H A D | videobuf2-memops.c | 96 vma->vm_end); in vb2_common_vm_open() 114 vma->vm_end); in vb2_common_vm_close()
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 416 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, in kvmppc_memslot_page_merge() 423 start = vma->vm_end; in kvmppc_memslot_page_merge() 424 } while (end > vma->vm_end); in kvmppc_memslot_page_merge() 628 if (!vma || addr >= vma->vm_end) { in kvmppc_uvmem_drop_pages() 812 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_uv_migrate_mem_slot() 972 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_in() 1073 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_out()
|
| /linux/arch/riscv/kvm/ |
| H A D | mmu.c | 212 hva_t vm_end; in kvm_arch_prepare_memory_region() local 228 vm_end = min(reg_end, vma->vm_end); in kvm_arch_prepare_memory_region() 237 hva = vm_end; in kvm_arch_prepare_memory_region()
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | file_ops.c | 315 vma->vm_end - vma->vm_start, vma->vm_flags); in mmap_cdbg() 415 if ((vma->vm_end - vma->vm_start) != memlen) { in hfi1_file_mmap() 417 (vma->vm_end - vma->vm_start), memlen); in hfi1_file_mmap() 434 vm_end_save = vma->vm_end; in hfi1_file_mmap() 435 vma->vm_end = vma->vm_start; in hfi1_file_mmap() 440 vma->vm_end += memlen; in hfi1_file_mmap() 447 vma->vm_end = vm_end_save; in hfi1_file_mmap() 453 vma->vm_end = vm_end_save; in hfi1_file_mmap() 553 if ((vma->vm_end - vma->vm_start) != memlen) { in hfi1_file_mmap() 556 (vma->vm_end - vma->vm_start), memlen); in hfi1_file_mmap()
|
| /linux/drivers/sbus/char/ |
| H A D | flash.c | 66 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap() 67 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap()
|