Lines Matching refs:vm_flags
83 vm_flags_t vm_flags = vma->vm_flags; in vma_set_page_prot() local
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
88 vm_flags &= ~VM_SHARED; in vma_set_page_prot()
89 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); in vma_set_page_prot()
228 bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, in mlock_future_ok() argument
233 if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) in mlock_future_ok()
336 unsigned long flags, vm_flags_t vm_flags, in do_mmap() argument
400 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | in do_mmap()
406 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); in do_mmap()
419 if (!mlock_future_ok(mm, vm_flags, len)) in do_mmap()
462 vm_flags |= VM_SHARED | VM_MAYSHARE; in do_mmap()
464 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); in do_mmap()
470 if (vm_flags & VM_EXEC) in do_mmap()
472 vm_flags &= ~VM_MAYEXEC; in do_mmap()
477 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) in do_mmap()
489 err = memfd_check_seals_mmap(file, &vm_flags); in do_mmap()
495 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) in do_mmap()
501 vm_flags |= VM_SHARED | VM_MAYSHARE; in do_mmap()
516 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) in do_mmap()
519 vm_flags |= VM_DROPPABLE; in do_mmap()
525 vm_flags |= VM_NORESERVE; in do_mmap()
531 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; in do_mmap()
551 vm_flags |= VM_NORESERVE; in do_mmap()
555 vm_flags |= VM_NORESERVE; in do_mmap()
558 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); in do_mmap()
560 ((vm_flags & VM_LOCKED) || in do_mmap()
646 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) in stack_guard_placement() argument
648 if (vm_flags & VM_SHADOW_STACK) in stack_guard_placement()
690 unsigned long flags, vm_flags_t vm_flags) in generic_get_unmapped_area() argument
715 info.start_gap = stack_guard_placement(vm_flags); in generic_get_unmapped_area()
725 unsigned long flags, vm_flags_t vm_flags) in arch_get_unmapped_area() argument
728 vm_flags); in arch_get_unmapped_area()
739 unsigned long flags, vm_flags_t vm_flags) in generic_get_unmapped_area_topdown() argument
767 info.start_gap = stack_guard_placement(vm_flags); in generic_get_unmapped_area_topdown()
793 unsigned long flags, vm_flags_t vm_flags) in arch_get_unmapped_area_topdown() argument
796 vm_flags); in arch_get_unmapped_area_topdown()
802 unsigned long flags, vm_flags_t vm_flags) in mm_get_unmapped_area_vmflags() argument
806 flags, vm_flags); in mm_get_unmapped_area_vmflags()
807 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); in mm_get_unmapped_area_vmflags()
812 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) in __get_unmapped_area() argument
848 pgoff, flags, vm_flags); in __get_unmapped_area()
851 pgoff, flags, vm_flags); in __get_unmapped_area()
971 if (prev->vm_flags & VM_LOCKED) in find_extend_vma_locked()
995 if (vma->vm_flags & VM_LOCKED) in find_extend_vma_locked()
1093 vm_flags_t vm_flags; in SYSCALL_DEFINE5() local
1120 if (!vma || !(vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE5()
1125 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
1126 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
1127 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
1131 if (vma->vm_flags & VM_LOCKED) in SYSCALL_DEFINE5()
1135 vm_flags = vma->vm_flags; in SYSCALL_DEFINE5()
1161 if (vma->vm_flags != vm_flags) in SYSCALL_DEFINE5()
1178 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
1203 int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags) in vm_brk_flags() argument
1220 if ((vm_flags & (~VM_EXEC)) != 0) in vm_brk_flags()
1235 ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); in vm_brk_flags()
1299 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
1440 vm_flags_t vm_flags, void *priv, in __install_special_mapping() argument
1451 vm_flags |= mm->def_flags | VM_DONTEXPAND; in __install_special_mapping()
1453 vm_flags |= VM_SOFTDIRTY; in __install_special_mapping()
1454 vm_flags_init(vma, vm_flags & ~VM_LOCKED_MASK); in __install_special_mapping()
1455 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
1464 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
1494 vm_flags_t vm_flags, const struct vm_special_mapping *spec) in _install_special_mapping() argument
1496 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
1705 if (!(new_vma->vm_flags & VM_GROWSDOWN)) in mmap_read_lock_maybe_expand()
1755 if (mpnt->vm_flags & VM_DONTCOPY) { in dup_mmap()
1761 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); in dup_mmap()
1765 if (mpnt->vm_flags & VM_ACCOUNT) { in dup_mmap()
1783 if (tmp->vm_flags & VM_WIPEONFORK) { in dup_mmap()
1826 if (!(tmp->vm_flags & VM_WIPEONFORK)) in dup_mmap()