Home
last modified time | relevance | path

Searched refs:vm_flags (Results 1 – 25 of 242) sorted by relevance

12345678910

/linux/tools/testing/vma/
H A Dvma.c70 vm_flags_t vm_flags) in alloc_vma() argument
80 vm_flags_reset(vma, vm_flags); in alloc_vma()
108 vm_flags_t vm_flags) in alloc_and_link_vma() argument
110 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); in alloc_and_link_vma()
177 unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) in vmg_set_range() argument
189 vmg->vm_flags = vm_flags; in vmg_set_range()
200 unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, in vmg_set_range_anon_vma() argument
203 vmg_set_range(vmg, start, end, pgoff, vm_flags); in vmg_set_range_anon_vma()
216 pgoff_t pgoff, vm_flags_t vm_flags, in try_merge_new_vma() argument
221 vmg_set_range(vmg, start, end, pgoff, vm_flags); in try_merge_new_vma()
[all …]
H A Dvma_internal.h621 vm_flags_t vm_flags; member
668 const vm_flags_t vm_flags; member
872 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) in vm_get_page_prot() argument
874 return __pgprot(vm_flags); in vm_get_page_prot()
877 static inline bool is_shared_maywrite(vm_flags_t vm_flags) in is_shared_maywrite() argument
879 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == in is_shared_maywrite()
885 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite()
1321 vm_flags_t vm_flags) in khugepaged_enter_vma() argument
1416 vm_flags_t vm_flags = vma->vm_flags; in vma_set_page_prot() local
1420 vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags)); in vma_set_page_prot()
[all …]
/linux/mm/
H A Dmmap.c83 vm_flags_t vm_flags = vma->vm_flags; in vma_set_page_prot() local
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
88 vm_flags &= ~VM_SHARED; in vma_set_page_prot()
89 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); in vma_set_page_prot()
228 bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, in mlock_future_ok() argument
233 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) in mlock_future_ok()
336 unsigned long flags, vm_flags_t vm_flags, in do_mmap() argument
400 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | in do_mmap()
406 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); in do_mmap()
419 if (!mlock_future_ok(mm, vm_flags, len)) in do_mmap()
[all …]
H A Dvma.c18 vm_flags_t vm_flags; member
50 .vm_flags = vm_flags_, \
61 .vm_flags = (map_)->vm_flags, \
92 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_IGNORE_MERGE) in is_mergeable_vma()
803 vm_flags_t sticky_flags = vmg->vm_flags & VM_STICKY; in vma_merge_existing_range()
840 if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side)) in vma_merge_existing_range()
896 sticky_flags |= (next->vm_flags & VM_STICKY); in vma_merge_existing_range()
902 sticky_flags |= (prev->vm_flags & VM_STICKY); in vma_merge_existing_range()
973 khugepaged_enter_vma(vmg->target, vmg->vm_flags); in vma_merge_existing_range()
1055 if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next)) in vma_merge_new_range()
[all …]
H A Dvma.h101 vm_flags_t vm_flags; member
173 .vm_flags = vm_flags_, \
187 .vm_flags = vma_->vm_flags, \
243 if (desc->vm_flags != vma->vm_flags) in set_vma_from_desc()
244 vm_flags_set(vma, desc->vm_flags); in set_vma_from_desc()
366 unsigned long start, unsigned long end, vm_flags_t vm_flags,
394 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
411 if (vma->vm_flags & VM_SHARED) in vma_wants_manual_pte_write_upgrade()
413 return !!(vma->vm_flags & VM_WRITE); in vma_wants_manual_pte_write_upgrade()
417 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags) in vm_pgprot_modify() argument
[all …]
H A Dnommu.c130 pgprot_t prot, unsigned long vm_flags, int node, in __vmalloc_node_range_noprof() argument
545 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
853 vm_flags_t vm_flags; in determine_vm_flags() local
855 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags); in determine_vm_flags()
862 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in determine_vm_flags()
866 vm_flags |= (capabilities & NOMMU_VMFLAGS); in determine_vm_flags()
868 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in determine_vm_flags()
879 vm_flags |= VM_MAYOVERLAY; in determine_vm_flags()
882 vm_flags |= VM_SHARED | VM_MAYSHARE | in determine_vm_flags()
886 return vm_flags; in determine_vm_flags()
[all …]
H A Duserfaultfd.c51 else if (!(vma->vm_flags & VM_SHARED) && in find_vma_and_prepare_anon()
80 if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma)) in uffd_lock_vma()
176 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte()
177 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte()
687 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
759 dst_vma->vm_flags & VM_SHARED)) in mfill_atomic()
766 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) in mfill_atomic()
1536 return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | in vma_move_compatible()
1545 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || in validate_move_areas()
1550 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) in validate_move_areas()
[all …]
H A Dmseal.c71 if (!(vma->vm_flags & VM_SEALED)) { in mseal_apply()
72 vm_flags_t vm_flags = vma->vm_flags | VM_SEALED; in mseal_apply() local
75 curr_end, &vm_flags); in mseal_apply()
H A Dexecmem.c29 pgprot_t pgprot, unsigned long vm_flags) in execmem_vmalloc() argument
39 vm_flags |= VM_DEFER_KMEMLEAK; in execmem_vmalloc()
42 pgprot, vm_flags, NUMA_NO_NODE, in execmem_vmalloc()
48 pgprot, vm_flags, NUMA_NO_NODE, in execmem_vmalloc()
82 pgprot_t pgprot, unsigned long vm_flags) in execmem_vmalloc() argument
283 unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; in execmem_cache_populate() local
290 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags); in execmem_cache_populate()
293 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags); in execmem_cache_populate()
465 unsigned long vm_flags = VM_FLUSH_RESET_PERMS; in execmem_alloc() local
474 p = execmem_vmalloc(range, size, pgprot, vm_flags); in execmem_alloc()
H A Dmlock.c332 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock()
371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
473 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup()
545 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags()
586 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr()
666 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local
672 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3()
674 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3()
731 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
H A Dmremap.c958 if (vma->vm_flags & VM_MAYSHARE) in vrm_set_new_addr()
980 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_calc_charge()
1007 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_uncharge()
1026 vm_stat_account(mm, vma->vm_flags, pages); in vrm_stat_account()
1027 if (vma->vm_flags & VM_LOCKED) in vrm_stat_account()
1041 vm_flags_t dummy = vma->vm_flags; in prep_move_vma()
1101 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && in unmap_source_vma()
1405 vm_flags_t vm_flags = vrm->vma->vm_flags; in mremap_to() local
1408 if (!may_expand_vm(mm, vm_flags, pages)) in mremap_to()
1694 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in check_prep_vma()
[all …]
/linux/arch/powerpc/include/asm/book3s/64/
H A Dhash-pkey.h8 static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) in hash__vmflag_to_pte_pkey_bits() argument
10 return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits()
11 ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits()
12 ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits()
13 ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits()
14 ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); in hash__vmflag_to_pte_pkey_bits()
/linux/include/linux/
H A Duserfaultfd_k.h164 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share()
176 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around()
181 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
186 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
191 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
208 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed()
212 vm_flags_t vm_flags, in vma_can_userfault() argument
215 vm_flags &= __VM_UFFD_FLAGS; in vma_can_userfault()
217 if (vma->vm_flags & VM_DROPPABLE) in vma_can_userfault()
220 if ((vm_flags & VM_UFFD_MINOR) && in vma_can_userfault()
[all …]
H A Dhugetlb_inline.h9 static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags) in is_vm_hugetlb_flags() argument
11 return !!(vm_flags & VM_HUGETLB); in is_vm_hugetlb_flags()
16 static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags) in is_vm_hugetlb_flags() argument
25 return is_vm_hugetlb_flags(vma->vm_flags); in is_vm_hugetlb_page()
H A Dhuge_mm.h104 #define thp_vma_allowable_order(vma, vm_flags, type, order) \ argument
105 (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order)))
269 vm_flags_t vm_flags,
290 vm_flags_t vm_flags, in thp_vma_allowable_orders() argument
301 if (vm_flags & VM_HUGEPAGE) in thp_vma_allowable_orders()
304 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) in thp_vma_allowable_orders()
312 return __thp_vma_allowable_orders(vma, vm_flags, type, orders); in thp_vma_allowable_orders()
332 vm_flags_t vm_flags, bool forced_collapse) in vma_thp_disabled() argument
335 if (vm_flags & VM_NOHUGEPAGE) in vma_thp_disabled()
344 if (vm_flags & VM_HUGEPAGE) in vma_thp_disabled()
[all …]
H A Dksm.h19 unsigned long end, int advice, vm_flags_t *vm_flags);
21 vm_flags_t vm_flags);
107 const struct file *file, vm_flags_t vm_flags) in ksm_vma_flags() argument
109 return vm_flags; in ksm_vma_flags()
142 unsigned long end, int advice, vm_flags_t *vm_flags) in ksm_madvise() argument
/linux/include/trace/events/
H A Dfs_dax.h18 __field(vm_flags_t, vm_flags)
31 __entry->vm_flags = vmf->vma->vm_flags;
43 __entry->vm_flags & VM_SHARED ? "shared" : "private",
70 __field(vm_flags_t, vm_flags)
79 __entry->vm_flags = vmf->vma->vm_flags;
89 __entry->vm_flags & VM_SHARED ? "shared" : "private",
110 __field(vm_flags_t, vm_flags)
120 __entry->vm_flags = vmf->vma->vm_flags;
130 __entry->vm_flags & VM_SHARED ? "shared" : "private",
/linux/arch/arm64/mm/
H A Dfault.c546 if (!(vma->vm_flags & VM_SHADOW_STACK)) in is_invalid_gcs_access()
548 } else if (unlikely(vma->vm_flags & VM_SHADOW_STACK)) { in is_invalid_gcs_access()
562 vm_flags_t vm_flags; in do_page_fault() local
590 vm_flags = VM_EXEC; in do_page_fault()
598 vm_flags = VM_WRITE; in do_page_fault()
602 vm_flags = VM_WRITE; in do_page_fault()
606 vm_flags = VM_READ; in do_page_fault()
608 vm_flags |= VM_WRITE; in do_page_fault()
611 vm_flags |= VM_EXEC; in do_page_fault()
640 if (!(vma->vm_flags & vm_flags)) { in do_page_fault()
[all …]
/linux/arch/loongarch/mm/
H A Dfault.c228 if (!(vma->vm_flags & VM_WRITE)) { in __do_page_fault()
235 if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) { in __do_page_fault()
241 if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) { in __do_page_fault()
295 if (!(vma->vm_flags & VM_WRITE)) in __do_page_fault()
298 if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) in __do_page_fault()
300 if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) in __do_page_fault()
/linux/arch/nios2/mm/
H A Dcacheflush.c90 if (!(vma->vm_flags & VM_MAYSHARE)) in flush_aliases()
138 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range()
159 if (vma->vm_flags & VM_EXEC) in flush_cache_page()
236 if (vma->vm_flags & VM_EXEC) in update_mmu_cache_range()
268 if (vma->vm_flags & VM_EXEC) in copy_from_user_page()
279 if (vma->vm_flags & VM_EXEC) in copy_to_user_page()
/linux/arch/hexagon/mm/
H A Dvm_fault.c70 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
74 if (!(vma->vm_flags & VM_READ)) in do_page_fault()
78 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
/linux/drivers/sbus/char/
H A Dflash.c44 if ((vma->vm_flags & VM_READ) && in flash_mmap()
45 (vma->vm_flags & VM_WRITE)) { in flash_mmap()
49 if (vma->vm_flags & VM_READ) { in flash_mmap()
52 } else if (vma->vm_flags & VM_WRITE) { in flash_mmap()
/linux/arch/arm/mm/
H A Dfault.c300 vm_flags_t vm_flags = VM_ACCESS_FLAGS; in do_page_fault() local
328 vm_flags = VM_WRITE; in do_page_fault()
332 vm_flags = VM_EXEC; in do_page_fault()
356 if (!(vma->vm_flags & vm_flags)) { in do_page_fault()
395 if (!(vma->vm_flags & vm_flags)) { in do_page_fault()
/linux/arch/sparc/mm/
H A Dfault_64.c352 (vma->vm_flags & VM_WRITE) != 0) { in do_sparc64_fault()
373 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_sparc64_fault()
402 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) { in do_sparc64_fault()
410 if (!(vma->vm_flags & VM_WRITE)) in do_sparc64_fault()
417 (vma->vm_flags & VM_EXEC) != 0 && in do_sparc64_fault()
425 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_sparc64_fault()
/linux/include/linux/sched/
H A Dmm.h186 unsigned long flags, vm_flags_t vm_flags);
201 vm_flags_t vm_flags);
206 unsigned long flags, vm_flags_t vm_flags);
210 unsigned long flags, vm_flags_t vm_flags);

12345678910