/linux/mm/ |
H A D | memory.c | 106 static vm_fault_t do_fault(struct vm_fault *vmf); 107 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 108 static bool vmf_pte_changed(struct vm_fault *vmf); 114 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument 116 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp() 118 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp() 121 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp() 3107 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument 3112 spin_lock(vmf->ptl); in pte_unmap_same() 3113 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); in pte_unmap_same() [all …]
|
H A D | huge_memory.c | 1218 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) in __do_huge_pmd_anonymous_page() argument 1220 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page() 1221 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() 1226 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in __do_huge_pmd_anonymous_page() 1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 1237 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page() 1246 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 1249 ret = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page() 1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 1254 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page() [all …]
|
H A D | filemap.c | 1714 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument 1716 unsigned int flags = vmf->flags; in __folio_lock_or_retry() 1726 release_fault_lock(vmf); in __folio_lock_or_retry() 1738 release_fault_lock(vmf); in __folio_lock_or_retry() 3150 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument 3161 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap() 3164 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap() 3165 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap() 3175 release_fault_lock(vmf); in lock_folio_maybe_drop_mmap() 3191 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument [all …]
|
H A D | hugetlb.c | 5152 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) in hugetlb_vm_op_fault() argument 5830 struct vm_fault *vmf) in hugetlb_wp() argument 5832 struct vm_area_struct *vma = vmf->vma; in hugetlb_wp() 5834 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in hugetlb_wp() 5835 pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte); in hugetlb_wp() 5856 set_huge_ptep_writable(vma, vmf->address, vmf->pte); in hugetlb_wp() 5882 set_huge_ptep_maybe_writable(vma, vmf->address, in hugetlb_wp() 5883 vmf->pte); in hugetlb_wp() 5910 spin_unlock(vmf->ptl); in hugetlb_wp() 5911 new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); in hugetlb_wp() [all …]
|
/linux/include/trace/events/ |
H A D | fs_dax.h | 11 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 13 TP_ARGS(inode, vmf, max_pgoff, result), 29 __entry->vm_start = vmf->vma->vm_start; 30 __entry->vm_end = vmf->vma->vm_end; 31 __entry->vm_flags = vmf->vma->vm_flags; 32 __entry->address = vmf->address; 33 __entry->flags = vmf->flags; 34 __entry->pgoff = vmf->pgoff; 56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 58 TP_ARGS(inode, vmf, max_pgof [all...] |
/linux/drivers/dax/ |
H A D | device.c | 76 static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, in dax_set_mapping() argument 80 struct file *filp = vmf->vma->vm_file; in dax_set_mapping() 88 pgoff = linear_page_index(vmf->vma, in dax_set_mapping() 89 ALIGN_DOWN(vmf->address, fault_size)); in dax_set_mapping() 104 struct vm_fault *vmf) in __dev_dax_pte_fault() argument 111 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault() 123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault() 125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault() 131 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault() 133 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in __dev_dax_pte_fault() [all …]
|
/linux/fs/ |
H A D | dax.c | 830 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() argument 844 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax() 845 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax() 869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument 873 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry() 876 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry() 898 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry() 1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument 1190 unsigned long vaddr = vmf->address; in dax_load_hole() 1194 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole() [all …]
|
H A D | userfaultfd.c | 231 struct vm_fault *vmf, in userfaultfd_huge_must_wait() argument 234 struct vm_area_struct *vma = vmf->vma; in userfaultfd_huge_must_wait() 238 assert_fault_locked(vmf); in userfaultfd_huge_must_wait() 240 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait() 245 pte = huge_ptep_get(vma->vm_mm, vmf->address, ptep); in userfaultfd_huge_must_wait() 261 struct vm_fault *vmf, in userfaultfd_huge_must_wait() argument 276 struct vm_fault *vmf, in userfaultfd_must_wait() argument 280 unsigned long address = vmf->address; in userfaultfd_must_wait() 289 assert_fault_locked(vmf); in userfaultfd_must_wait() 363 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigne argument [all...] |
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_bo_vm.c | 42 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument 57 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle() 58 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle() 62 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 117 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument 131 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve() 132 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve() 134 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve() 181 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, in ttm_bo_vm_fault_reserved() argument 185 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved() [all …]
|
/linux/fs/ocfs2/ |
H A D | mmap.c | 31 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument 33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault() 38 ret = filemap_fault(vmf); in ocfs2_fault() 42 vma, vmf->page, vmf->pgoff); in ocfs2_fault() 113 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument 115 struct folio *folio = page_folio(vmf->page); in ocfs2_page_mkwrite() 116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite() 144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio); in ocfs2_page_mkwrite()
|
/linux/drivers/video/fbdev/core/ |
H A D | fb_defio.c | 129 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument 133 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault() 135 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault() 143 if (vmf->vma->vm_file) in fb_deferred_io_fault() 144 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault() 149 page->index = vmf->pgoff; /* for folio_mkclean() */ in fb_deferred_io_fault() 151 vmf->page = page; in fb_deferred_io_fault() 230 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) in fb_deferred_io_page_mkwrite() argument 232 unsigned long offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_page_mkwrite() 233 struct page *page = vmf->page; in fb_deferred_io_page_mkwrite() [all …]
|
/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_page_dirty.c | 375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument 377 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite() 389 save_flags = vmf->flags; in vmw_bo_vm_mkwrite() 390 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite() 391 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite() 392 vmf->flags = save_flags; in vmw_bo_vm_mkwrite() 396 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite() 416 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument 418 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault() 426 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault() [all …]
|
/linux/fs/xfs/ |
H A D | xfs_file.c | 1424 struct vm_fault *vmf, in xfs_dax_fault_locked() argument 1435 ret = dax_iomap_fault(vmf, order, &pfn, NULL, in xfs_dax_fault_locked() 1436 (write_fault && !vmf->cow_page) ? in xfs_dax_fault_locked() 1440 ret = dax_finish_sync_fault(vmf, order, pfn); in xfs_dax_fault_locked() 1446 struct vm_fault *vmf, in xfs_dax_read_fault() argument 1449 struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); in xfs_dax_read_fault() 1454 ret = filemap_fsnotify_fault(vmf); in xfs_dax_read_fault() 1458 ret = xfs_dax_fault_locked(vmf, order, false); in xfs_dax_read_fault() 1476 struct vm_fault *vmf, in xfs_write_fault() argument 1479 struct inode *inode = file_inode(vmf->vma->vm_file); in xfs_write_fault() [all …]
|
/linux/drivers/xen/ |
H A D | privcmd-buf.c | 118 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument 121 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault() 122 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
|
/linux/fs/nilfs2/ |
H A D | file.c | 45 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) in nilfs_page_mkwrite() argument 47 struct vm_area_struct *vma = vmf->vma; in nilfs_page_mkwrite() 48 struct folio *folio = page_folio(vmf->page); in nilfs_page_mkwrite() 101 ret = block_page_mkwrite(vma, vmf, nilfs_get_block); in nilfs_page_mkwrite()
|
/linux/drivers/misc/cxl/ |
H A D | context.c | 126 static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) in cxl_mmap_fault() argument 128 struct vm_area_struct *vma = vmf->vma; in cxl_mmap_fault() 133 offset = vmf->pgoff << PAGE_SHIFT; in cxl_mmap_fault() 136 __func__, ctx->pe, vmf->address, offset); in cxl_mmap_fault() 161 vmf->page = ctx->ff_page; in cxl_mmap_fault() 168 ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault()
|
/linux/drivers/misc/ocxl/ |
H A D | context.c | 139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) in ocxl_mmap_fault() argument 141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault() 146 offset = vmf->pgoff << PAGE_SHIFT; in ocxl_mmap_fault() 148 ctx->pasid, vmf->address, offset); in ocxl_mmap_fault() 151 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault() 153 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
|
/linux/arch/loongarch/kernel/ |
H A D | vdso.c | 48 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument 53 switch (vmf->pgoff) { in vvar_fault() 75 pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; in vvar_fault() 81 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault()
|
/linux/fs/ext4/ |
H A D | file.c | 721 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) in ext4_dax_huge_fault() argument 727 struct inode *inode = file_inode(vmf->vma->vm_file); in ext4_dax_huge_fault() 741 bool write = (vmf->flags & FAULT_FLAG_WRITE) && in ext4_dax_huge_fault() 742 (vmf->vma->vm_flags & VM_SHARED); in ext4_dax_huge_fault() 743 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in ext4_dax_huge_fault() 748 file_update_time(vmf->vma->vm_file); in ext4_dax_huge_fault() 759 result = filemap_fsnotify_fault(vmf); in ext4_dax_huge_fault() 764 result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); in ext4_dax_huge_fault() 773 result = dax_finish_sync_fault(vmf, order, pfn); in ext4_dax_huge_fault() 783 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) in ext4_dax_fault() argument [all …]
|
/linux/drivers/char/agp/ |
H A D | alpha-agp.c | 14 static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) in alpha_core_agp_vm_fault() argument 21 dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; in alpha_core_agp_vm_fault() 32 vmf->page = page; in alpha_core_agp_vm_fault()
|
/linux/sound/usb/usx2y/ |
H A D | usX2Yhwdep.c | 21 static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) in snd_us428ctls_vm_fault() argument 27 offset = vmf->pgoff << PAGE_SHIFT; in snd_us428ctls_vm_fault() 28 vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; in snd_us428ctls_vm_fault() 31 vmf->page = page; in snd_us428ctls_vm_fault()
|
/linux/arch/s390/kernel/ |
H A D | vdso.c | 67 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument 73 switch (vmf->pgoff) { in vvar_fault() 81 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE; in vvar_fault() 105 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault()
|
/linux/arch/x86/kernel/cpu/sgx/ |
H A D | virt.c | 74 static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) in sgx_vepc_fault() argument 76 struct vm_area_struct *vma = vmf->vma; in sgx_vepc_fault() 81 ret = __sgx_vepc_fault(vepc, vma, vmf->address); in sgx_vepc_fault() 87 if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) { in sgx_vepc_fault()
|
/linux/kernel/bpf/ |
H A D | arena.c | 264 static vm_fault_t arena_vm_fault(struct vm_fault *vmf) in arena_vm_fault() argument 266 struct bpf_map *map = vmf->vma->vm_file->private_data; in arena_vm_fault() 273 kaddr = kbase + (u32)(vmf->address); in arena_vm_fault() 285 ret = range_tree_clear(&arena->rt, vmf->pgoff, 1); in arena_vm_fault() 292 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault() 298 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault() 304 vmf->page = page; in arena_vm_fault()
|
/linux/drivers/gpu/drm/gma500/ |
H A D | gem.c | 109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf); 254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf) in psb_gem_fault() argument 256 struct vm_area_struct *vma = vmf->vma; in psb_gem_fault() 290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault() 297 ret = vmf_insert_pfn(vma, vmf->address, pfn); in psb_gem_fault()
|