Lines Matching refs:pmd

1073 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)  in maybe_pmd_mkwrite()  argument
1076 pmd = pmd_mkwrite(pmd, vma); in maybe_pmd_mkwrite()
1077 return pmd; in maybe_pmd_mkwrite()
1296 void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd, in map_anon_folio_pmd_nopf() argument
1305 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd_nopf()
1306 update_mmu_cache_pmd(vma, haddr, pmd); in map_anon_folio_pmd_nopf()
1310 static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd, in map_anon_folio_pmd_pf() argument
1313 map_anon_folio_pmd_nopf(folio, pmd, vma, haddr); in map_anon_folio_pmd_pf()
1338 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1339 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
1355 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
1356 map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
1386 ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_device_private()
1387 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) { in do_huge_pmd_device_private()
1447 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_folio() argument
1453 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_folio()
1454 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_folio()
1487 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
1489 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
1501 haddr, vmf->pmd, zero_folio); in do_huge_pmd_anonymous_page()
1502 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
1524 pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot, in insert_pmd() argument
1541 ptl = pmd_lock(mm, pmd); in insert_pmd()
1542 if (!pmd_none(*pmd)) { in insert_pmd()
1547 if (pmd_pfn(*pmd) != pfn) { in insert_pmd()
1548 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pmd()
1551 entry = pmd_mkyoung(*pmd); in insert_pmd()
1553 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pmd()
1554 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1579 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pmd()
1584 set_pmd_at(mm, addr, pmd, entry); in insert_pmd()
1585 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1626 return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write); in vmf_insert_pfn_pmd()
1643 return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write); in vmf_insert_folio_pmd()
1774 pmd_t *pmd, bool write) in touch_pmd() argument
1778 entry = pmd_mkyoung(*pmd); in touch_pmd()
1782 pmd, entry, write)) { in touch_pmd()
1783 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1794 pmd_t pmd, pgtable_t pgtable) in copy_huge_non_present_pmd() argument
1796 softleaf_t entry = softleaf_from_pmd(pmd); in copy_huge_non_present_pmd()
1799 VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(pmd)); in copy_huge_non_present_pmd()
1804 pmd = swp_entry_to_pmd(entry); in copy_huge_non_present_pmd()
1806 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_non_present_pmd()
1808 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_non_present_pmd()
1809 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_non_present_pmd()
1817 pmd = swp_entry_to_pmd(entry); in copy_huge_non_present_pmd()
1820 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_non_present_pmd()
1822 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_non_present_pmd()
1823 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_non_present_pmd()
1842 pmd = pmd_swp_clear_uffd_wp(pmd); in copy_huge_non_present_pmd()
1843 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_non_present_pmd()
1853 pmd_t pmd; in copy_huge_pmd() local
1857 pmd = pmdp_get_lockless(src_pmd); in copy_huge_pmd()
1858 if (unlikely(pmd_present(pmd) && pmd_special(pmd) && in copy_huge_pmd()
1859 !is_huge_zero_pmd(pmd))) { in copy_huge_pmd()
1872 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1889 pmd = *src_pmd; in copy_huge_pmd()
1892 pmd_is_valid_softleaf(pmd))) { in copy_huge_pmd()
1894 dst_vma, src_vma, pmd, pgtable); in copy_huge_pmd()
1899 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1908 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1918 src_page = pmd_page(pmd); in copy_huge_pmd()
1938 pmd = pmd_clear_uffd_wp(pmd); in copy_huge_pmd()
1939 pmd = pmd_wrprotect(pmd); in copy_huge_pmd()
1941 pmd = pmd_mkold(pmd); in copy_huge_pmd()
1942 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
2019 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
2022 return touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
2040 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_zero_wp_pmd()
2041 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) in do_huge_zero_wp_pmd()
2046 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); in do_huge_zero_wp_pmd()
2047 map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr); in do_huge_zero_wp_pmd()
2066 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
2081 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
2099 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
2137 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
2138 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
2147 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in do_huge_pmd_wp_page()
2152 unsigned long addr, pmd_t pmd) in can_change_pmd_writable() argument
2160 if (pmd_protnone(pmd)) in can_change_pmd_writable()
2164 if (pmd_needs_soft_dirty_wp(vma, pmd)) in can_change_pmd_writable()
2168 if (userfaultfd_huge_pmd_wp(vma, pmd)) in can_change_pmd_writable()
2173 page = vm_normal_page_pmd(vma, addr, pmd); in can_change_pmd_writable()
2178 return pmd_dirty(pmd); in can_change_pmd_writable()
2189 pmd_t pmd, old_pmd; in do_huge_pmd_numa_page() local
2193 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2194 old_pmd = pmdp_get(vmf->pmd); in do_huge_pmd_numa_page()
2201 pmd = pmd_modify(old_pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
2207 writable = pmd_write(pmd); in do_huge_pmd_numa_page()
2209 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
2212 folio = vm_normal_folio_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
2238 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2239 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { in do_huge_pmd_numa_page()
2245 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); in do_huge_pmd_numa_page()
2246 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
2248 pmd = pmd_mkwrite(pmd, vma); in do_huge_pmd_numa_page()
2249 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
2250 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
2263 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
2273 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
2277 orig_pmd = *pmd; in madvise_free_huge_pmd()
2316 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
2320 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
2321 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
2332 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
2336 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
2342 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
2349 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
2358 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
2361 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
2364 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2368 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2392 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2396 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2437 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
2440 if (unlikely(pmd_is_migration_entry(pmd))) in move_soft_dirty_pmd()
2441 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2442 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
2443 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2446 return pmd; in move_soft_dirty_pmd()
2449 static pmd_t clear_uffd_wp_pmd(pmd_t pmd) in clear_uffd_wp_pmd() argument
2451 if (pmd_none(pmd)) in clear_uffd_wp_pmd()
2452 return pmd; in clear_uffd_wp_pmd()
2453 if (pmd_present(pmd)) in clear_uffd_wp_pmd()
2454 pmd = pmd_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2456 pmd = pmd_swp_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2458 return pmd; in clear_uffd_wp_pmd()
2465 pmd_t pmd; in move_huge_pmd() local
2488 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
2489 if (pmd_present(pmd)) in move_huge_pmd()
2498 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
2500 pmd = clear_uffd_wp_pmd(pmd); in move_huge_pmd()
2501 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
2513 unsigned long addr, pmd_t *pmd, bool uffd_wp, in change_non_present_huge_pmd() argument
2516 softleaf_t entry = softleaf_from_pmd(*pmd); in change_non_present_huge_pmd()
2520 VM_WARN_ON(!pmd_is_valid_softleaf(*pmd)); in change_non_present_huge_pmd()
2531 if (pmd_swp_soft_dirty(*pmd)) in change_non_present_huge_pmd()
2537 newpmd = *pmd; in change_non_present_huge_pmd()
2544 if (!pmd_same(*pmd, newpmd)) in change_non_present_huge_pmd()
2545 set_pmd_at(mm, addr, pmd, newpmd); in change_non_present_huge_pmd()
2556 pmd_t *pmd, unsigned long addr, pgprot_t newprot, in change_huge_pmd() argument
2572 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
2576 if (thp_migration_supported() && pmd_is_valid_softleaf(*pmd)) { in change_huge_pmd()
2577 change_non_present_huge_pmd(mm, addr, pmd, uffd_wp, in change_huge_pmd()
2589 if (is_huge_zero_pmd(*pmd)) in change_huge_pmd()
2592 if (pmd_protnone(*pmd)) in change_huge_pmd()
2595 if (!folio_can_map_prot_numa(pmd_folio(*pmd), vma, in change_huge_pmd()
2620 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
2639 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
2821 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
2825 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
2826 if (likely(pmd_is_huge(*pmd))) in __pmd_trans_huge_lock()
2944 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2961 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2963 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2981 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2984 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
3002 VM_WARN_ON_ONCE(!pmd_is_valid_softleaf(*pmd) && !pmd_trans_huge(*pmd)); in __split_huge_pmd_locked()
3007 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_pmd_locked()
3013 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
3036 if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
3046 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
3049 if (pmd_is_migration_entry(*pmd)) { in __split_huge_pmd_locked()
3052 old_pmd = *pmd; in __split_huge_pmd_locked()
3065 } else if (pmd_is_device_private_entry(*pmd)) { in __split_huge_pmd_locked()
3068 old_pmd = *pmd; in __split_huge_pmd_locked()
3121 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
3170 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
3256 if (!pmd_is_migration_entry(*pmd)) in __split_huge_pmd_locked()
3262 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
3266 pmd_t *pmd, bool freeze) in split_huge_pmd_locked() argument
3269 if (pmd_trans_huge(*pmd) || pmd_is_valid_softleaf(*pmd)) in split_huge_pmd_locked()
3270 __split_huge_pmd_locked(vma, pmd, address, freeze); in split_huge_pmd_locked()
3273 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
3283 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
3284 split_huge_pmd_locked(vma, range.start, pmd, freeze); in __split_huge_pmd()
3292 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address() local
3294 if (!pmd) in split_huge_pmd_address()
3297 __split_huge_pmd(vma, pmd, address, freeze); in split_huge_pmd_address()
4865 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
4869 if (unlikely(!pmd_present(*pvmw->pmd))) in set_pmd_migration_entry()
4870 pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd); in set_pmd_migration_entry()
4872 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
4877 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
4898 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
4916 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
4919 entry = softleaf_from_pmd(*pvmw->pmd); in remove_migration_pmd()
4923 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
4927 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
4946 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
4948 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
4963 set_pmd_at(mm, haddr, pvmw->pmd, pmde); in remove_migration_pmd()
4966 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()