Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 25 of 135) sorted by relevance

123456

/linux/mm/
H A Dmemory.c97 static vm_fault_t do_fault(struct vm_fault *vmf);
98 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
99 static bool vmf_pte_changed(struct vm_fault *vmf);
105 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument
107 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp()
109 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp()
112 return pte_is_uffd_wp_marker(vmf->orig_pte); in vmf_orig_pte_uffd_wp()
2762 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, in vmf_insert_page_mkwrite() argument
2765 pgprot_t pgprot = vmf->vma->vm_page_prot; in vmf_insert_page_mkwrite()
2766 unsigned long addr = vmf->address; in vmf_insert_page_mkwrite()
[all …]
H A Dhuge_memory.c1320 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) in __do_huge_pmd_anonymous_page() argument
1322 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
1323 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
1328 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in __do_huge_pmd_anonymous_page()
1338 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1339 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
1348 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
1351 ret = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page()
1355 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
1356 map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
[all …]
H A Dhugetlb.c4794 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) in hugetlb_vm_op_fault() argument
5445 static vm_fault_t hugetlb_wp(struct vm_fault *vmf) in hugetlb_wp() argument
5447 struct vm_area_struct *vma = vmf->vma; in hugetlb_wp()
5449 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in hugetlb_wp()
5450 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); in hugetlb_wp()
5471 set_huge_ptep_writable(vma, vmf->address, vmf->pte); in hugetlb_wp()
5497 set_huge_ptep_maybe_writable(vma, vmf->address, in hugetlb_wp()
5498 vmf->pte); in hugetlb_wp()
5526 spin_unlock(vmf->ptl); in hugetlb_wp()
5527 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); in hugetlb_wp()
[all …]
H A Dfilemap.c1748 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument
1750 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1760 release_fault_lock(vmf); in __folio_lock_or_retry()
1772 release_fault_lock(vmf); in __folio_lock_or_retry()
3259 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3270 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3273 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
3274 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3284 release_fault_lock(vmf); in lock_folio_maybe_drop_mmap()
3300 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
[all …]
H A Dsecretmem.c50 static vm_fault_t secretmem_fault(struct vm_fault *vmf) in secretmem_fault() argument
52 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in secretmem_fault()
53 struct inode *inode = file_inode(vmf->vma->vm_file); in secretmem_fault()
54 pgoff_t offset = vmf->pgoff; in secretmem_fault()
55 gfp_t gfp = vmf->gfp_mask; in secretmem_fault()
61 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) in secretmem_fault()
103 vmf->page = folio_file_page(folio, vmf->pgoff); in secretmem_fault()
H A Dswap_state.c672 static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, in swap_vma_ra_win() argument
675 struct vm_area_struct *vma = vmf->vma; in swap_vma_ra_win()
684 faddr = vmf->address; in swap_vma_ra_win()
727 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() argument
738 win = swap_vma_ra_win(vmf, &start, &end); in swap_vma_readahead()
742 ilx = targ_ilx - PFN_DOWN(vmf->address - start); in swap_vma_readahead()
750 pte = pte_offset_map(vmf->pmd, addr); in swap_vma_readahead()
778 if (addr != vmf->address) { in swap_vma_readahead()
812 struct vm_fault *vmf) in swapin_readahead() argument
818 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); in swapin_readahead()
[all …]
/linux/drivers/dax/
H A Ddevice.c83 static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn, in dax_set_mapping() argument
87 struct file *filp = vmf->vma->vm_file; in dax_set_mapping()
95 pgoff = linear_page_index(vmf->vma, in dax_set_mapping()
96 ALIGN_DOWN(vmf->address, fault_size)); in dax_set_mapping()
110 struct vm_fault *vmf) in __dev_dax_pte_fault() argument
117 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault()
129 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault()
131 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault()
137 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault()
139 return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), in __dev_dax_pte_fault()
[all …]
/linux/include/trace/events/
H A Dfs_dax.h11 TP_PROTO(struct inode *inode, struct vm_fault *vmf,
13 TP_ARGS(inode, vmf, max_pgoff, result),
29 __entry->vm_start = vmf->vma->vm_start;
30 __entry->vm_end = vmf->vma->vm_end;
31 __entry->vm_flags = vmf->vma->vm_flags;
32 __entry->address = vmf->address;
33 __entry->flags = vmf->flags;
34 __entry->pgoff = vmf->pgoff;
56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
58 TP_ARGS(inode, vmf, max_pgoff, result))
[all …]
/linux/fs/
H A Ddax.c1003 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() argument
1017 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax()
1018 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax()
1042 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
1046 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry()
1049 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry()
1071 dax_associate_entry(new_entry, mapping, vmf->vma, in dax_insert_entry()
1072 vmf->address, shared); in dax_insert_entry()
1358 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1362 unsigned long vaddr = vmf->address; in dax_load_hole()
[all …]
H A Duserfaultfd.c231 struct vm_fault *vmf, in userfaultfd_huge_must_wait() argument
234 struct vm_area_struct *vma = vmf->vma; in userfaultfd_huge_must_wait()
237 assert_fault_locked(vmf); in userfaultfd_huge_must_wait()
239 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait()
243 pte = huge_ptep_get(vma->vm_mm, vmf->address, ptep); in userfaultfd_huge_must_wait()
267 struct vm_fault *vmf, in userfaultfd_huge_must_wait() argument
284 struct vm_fault *vmf, in userfaultfd_must_wait() argument
288 unsigned long address = vmf->address; in userfaultfd_must_wait()
297 assert_fault_locked(vmf); in userfaultfd_must_wait()
381 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) in handle_userfault() argument
[all …]
/linux/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c44 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument
59 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle()
60 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle()
64 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle()
119 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument
133 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve()
134 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve()
136 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve()
183 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, in ttm_bo_vm_fault_reserved() argument
187 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved()
[all …]
/linux/fs/ocfs2/
H A Dmmap.c31 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument
33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault()
38 ret = filemap_fault(vmf); in ocfs2_fault()
42 vma, vmf->page, vmf->pgoff); in ocfs2_fault()
113 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument
115 struct folio *folio = page_folio(vmf->page); in ocfs2_page_mkwrite()
116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite()
144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio); in ocfs2_page_mkwrite()
/linux/arch/x86/entry/vdso/
H A Dvma.c53 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
57 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault()
60 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault()
61 get_page(vmf->page); in vdso_fault()
92 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_vclock_fault() argument
94 switch (vmf->pgoff) { in vvar_vclock_fault()
102 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_vclock_fault()
113 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_vclock_fault()
/linux/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_page_dirty.c394 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument
396 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite()
408 save_flags = vmf->flags; in vmw_bo_vm_mkwrite()
409 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite()
410 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite()
411 vmf->flags = save_flags; in vmw_bo_vm_mkwrite()
415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
435 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument
437 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault()
445 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault()
[all …]
/linux/lib/vdso/
H A Ddatastore.c41 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument
47 switch (vmf->pgoff) { in vvar_fault()
57 addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE; in vvar_fault()
85 vmf->pgoff - VDSO_ARCH_PAGES_START; in vvar_fault()
91 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault()
/linux/drivers/xen/
H A Dprivcmd-buf.c118 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument
121 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault()
122 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
/linux/drivers/hv/
H A Dmshv_vtl_main.c792 static vm_fault_t mshv_vtl_fault(struct vm_fault *vmf) in mshv_vtl_fault() argument
795 int cpu = vmf->pgoff & MSHV_PG_OFF_CPU_MASK; in mshv_vtl_fault()
796 int real_off = vmf->pgoff >> MSHV_REAL_OFF_SHIFT; in mshv_vtl_fault()
816 vmf->page = page; in mshv_vtl_fault()
1214 static bool can_fault(struct vm_fault *vmf, unsigned long size, unsigned long *pfn) in can_fault() argument
1217 unsigned long start = vmf->address & ~mask; in can_fault()
1221 is_valid = (vmf->address & mask) == ((vmf->pgoff << PAGE_SHIFT) & mask) && in can_fault()
1222 start >= vmf->vma->vm_start && in can_fault()
1223 end <= vmf->vma->vm_end; in can_fault()
1226 *pfn = vmf->pgoff & ~(mask >> PAGE_SHIFT); in can_fault()
[all …]
/linux/include/linux/
H A Dhuge_mm.h10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
14 bool huge_pmd_set_accessed(struct vm_fault *vmf);
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
44 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
46 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
521 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
[all …]
/linux/fs/nilfs2/
H A Dfile.c45 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) in nilfs_page_mkwrite() argument
47 struct vm_area_struct *vma = vmf->vma; in nilfs_page_mkwrite()
48 struct folio *folio = page_folio(vmf->page); in nilfs_page_mkwrite()
101 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); in nilfs_page_mkwrite()
/linux/drivers/misc/ocxl/
H A Dsysfs.c109 static vm_fault_t global_mmio_fault(struct vm_fault *vmf) in global_mmio_fault() argument
111 struct vm_area_struct *vma = vmf->vma; in global_mmio_fault()
115 if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) in global_mmio_fault()
118 offset = vmf->pgoff; in global_mmio_fault()
120 return vmf_insert_pfn(vma, vmf->address, offset); in global_mmio_fault()
H A Dcontext.c139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) in ocxl_mmap_fault() argument
141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault()
146 offset = vmf->pgoff << PAGE_SHIFT; in ocxl_mmap_fault()
148 ctx->pasid, vmf->address, offset); in ocxl_mmap_fault()
151 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
153 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
/linux/fs/ext2/
H A Dfile.c93 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) in ext2_dax_fault() argument
95 struct inode *inode = file_inode(vmf->vma->vm_file); in ext2_dax_fault()
97 bool write = (vmf->flags & FAULT_FLAG_WRITE) && in ext2_dax_fault()
98 (vmf->vma->vm_flags & VM_SHARED); in ext2_dax_fault()
102 file_update_time(vmf->vma->vm_file); in ext2_dax_fault()
106 ret = dax_iomap_fault(vmf, 0, NULL, NULL, &ext2_iomap_ops); in ext2_dax_fault()
/linux/drivers/char/agp/
H A Dalpha-agp.c14 static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) in alpha_core_agp_vm_fault() argument
21 dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; in alpha_core_agp_vm_fault()
32 vmf->page = page; in alpha_core_agp_vm_fault()
/linux/drivers/gpu/drm/gma500/
H A Dfbdev.c22 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) in psb_fbdev_vm_fault() argument
24 struct vm_area_struct *vma = vmf->vma; in psb_fbdev_vm_fault()
26 unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT); in psb_fbdev_vm_fault()
/linux/fs/ext4/
H A Dfile.c727 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) in ext4_dax_huge_fault() argument
733 struct inode *inode = file_inode(vmf->vma->vm_file); in ext4_dax_huge_fault()
747 bool write = (vmf->flags & FAULT_FLAG_WRITE) && in ext4_dax_huge_fault()
748 (vmf->vma->vm_flags & VM_SHARED); in ext4_dax_huge_fault()
749 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in ext4_dax_huge_fault()
754 file_update_time(vmf->vma->vm_file); in ext4_dax_huge_fault()
767 result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); in ext4_dax_huge_fault()
776 result = dax_finish_sync_fault(vmf, order, pfn); in ext4_dax_huge_fault()
786 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) in ext4_dax_fault() argument
788 return ext4_dax_huge_fault(vmf, 0); in ext4_dax_fault()

123456