Lines Matching defs:vmf
1217 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1219 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1220 struct vm_area_struct *vma = vmf->vma;
1225 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1235 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1236 if (unlikely(!pmd_none(*vmf->pmd))) {
1245 spin_unlock(vmf->ptl);
1248 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1252 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1253 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
1256 spin_unlock(vmf->ptl);
1261 spin_unlock(vmf->ptl);
1317 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1319 struct vm_area_struct *vma = vmf->vma;
1320 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1325 ret = vmf_anon_prepare(vmf);
1330 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1346 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1348 if (pmd_none(*vmf->pmd)) {
1351 spin_unlock(vmf->ptl);
1354 spin_unlock(vmf->ptl);
1356 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1360 haddr, vmf->pmd, zero_folio);
1361 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1362 spin_unlock(vmf->ptl);
1365 spin_unlock(vmf->ptl);
1371 return __do_huge_pmd_anonymous_page(vmf);
1436 * @vmf: Structure describing the fault
1444 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
1447 unsigned long addr = vmf->address & PMD_MASK;
1448 struct vm_area_struct *vma = vmf->vma;
1478 ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1479 error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write,
1489 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
1492 struct vm_area_struct *vma = vmf->vma;
1493 unsigned long addr = vmf->address & PMD_MASK;
1515 ptl = pmd_lock(mm, vmf->pmd);
1516 error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot,
1575 * @vmf: Structure describing the fault
1583 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
1586 unsigned long addr = vmf->address & PUD_MASK;
1587 struct vm_area_struct *vma = vmf->vma;
1609 ptl = pud_lock(vma->vm_mm, vmf->pud);
1610 insert_pud(vma, addr, vmf->pud, fop, pgprot, write);
1619 * @vmf: Structure describing the fault
1625 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
1628 struct vm_area_struct *vma = vmf->vma;
1629 unsigned long addr = vmf->address & PUD_MASK;
1630 pud_t *pud = vmf->pud;
1645 insert_pud(vma, addr, vmf->pud, fop, vma->vm_page_prot, write);
1838 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1840 bool write = vmf->flags & FAULT_FLAG_WRITE;
1842 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1843 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1846 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1848 spin_unlock(vmf->ptl);
1852 void huge_pmd_set_accessed(struct vm_fault *vmf)
1854 bool write = vmf->flags & FAULT_FLAG_WRITE;
1856 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1857 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1860 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1863 spin_unlock(vmf->ptl);
1866 static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
1868 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1869 struct vm_area_struct *vma = vmf->vma;
1874 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1881 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1882 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
1887 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
1888 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
1893 spin_unlock(vmf->ptl);
1898 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1900 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1901 struct vm_area_struct *vma = vmf->vma;
1904 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1905 pmd_t orig_pmd = vmf->orig_pmd;
1907 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1911 vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
1920 spin_lock(vmf->ptl);
1922 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1923 spin_unlock(vmf->ptl);
1937 spin_unlock(vmf->ptl);
1939 spin_lock(vmf->ptl);
1940 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1941 spin_unlock(vmf->ptl);
1973 spin_unlock(vmf->ptl);
1978 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1979 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1980 spin_unlock(vmf->ptl);
1986 spin_unlock(vmf->ptl);
1988 __split_huge_pmd(vma, vmf->pmd, vmf->address, false);
2023 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
2025 struct vm_area_struct *vma = vmf->vma;
2027 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2034 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2035 old_pmd = pmdp_get(vmf->pmd);
2037 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
2038 spin_unlock(vmf->ptl);
2050 can_change_pmd_writable(vma, vmf->address, pmd))
2059 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
2068 spin_unlock(vmf->ptl);
2079 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2080 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
2081 spin_unlock(vmf->ptl);
2086 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
2090 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2091 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2092 spin_unlock(vmf->ptl);