Lines Matching full:pmd

782 	 * Default to setting PMD-sized THP to inherit the global setting and  in hugepage_init_sysfs()
1072 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
1075 pmd = pmd_mkwrite(pmd, vma); in maybe_pmd_mkwrite()
1076 return pmd; in maybe_pmd_mkwrite()
1220 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, in map_anon_folio_pmd() argument
1229 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd()
1230 update_mmu_cache_pmd(vma, haddr, pmd); in map_anon_folio_pmd()
1255 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1256 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
1272 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
1273 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
1327 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_folio() argument
1333 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_folio()
1334 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_folio()
1367 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
1369 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
1381 haddr, vmf->pmd, zero_folio); in do_huge_pmd_anonymous_page()
1382 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
1404 pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot, in insert_pmd() argument
1421 ptl = pmd_lock(mm, pmd); in insert_pmd()
1422 if (!pmd_none(*pmd)) { in insert_pmd()
1427 if (pmd_pfn(*pmd) != pfn) { in insert_pmd()
1428 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pmd()
1431 entry = pmd_mkyoung(*pmd); in insert_pmd()
1433 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pmd()
1434 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1459 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pmd()
1464 set_pmd_at(mm, addr, pmd, entry); in insert_pmd()
1465 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1475 * vmf_insert_pfn_pmd - insert a pmd size pfn
1480 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1506 return insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write); in vmf_insert_pfn_pmd()
1523 return insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, write); in vmf_insert_folio_pmd()
1645 pmd_t *pmd, bool write) in touch_pmd() argument
1649 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
1653 pmd, _pmd, write)) in touch_pmd()
1654 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1664 pmd_t pmd; in copy_huge_pmd() local
1668 pmd = pmdp_get_lockless(src_pmd); in copy_huge_pmd()
1669 if (unlikely(pmd_present(pmd) && pmd_special(pmd) && in copy_huge_pmd()
1670 !is_huge_zero_pmd(pmd))) { in copy_huge_pmd()
1675 * No need to recheck the pmd, it can't change with write in copy_huge_pmd()
1683 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1700 pmd = *src_pmd; in copy_huge_pmd()
1703 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
1704 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
1706 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
1710 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
1712 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
1714 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_pmd()
1715 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
1721 pmd = pmd_swp_clear_uffd_wp(pmd); in copy_huge_pmd()
1722 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1728 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1733 * When page table lock is held, the huge zero pmd should not be in copy_huge_pmd()
1734 * under splitting since we don't split the page itself, only pmd to in copy_huge_pmd()
1737 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1747 src_page = pmd_page(pmd); in copy_huge_pmd()
1767 pmd = pmd_clear_uffd_wp(pmd); in copy_huge_pmd()
1768 pmd = pmd_wrprotect(pmd); in copy_huge_pmd()
1770 pmd = pmd_mkold(pmd); in copy_huge_pmd()
1771 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1848 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1849 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
1852 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1873 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_zero_wp_pmd()
1874 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) in do_huge_zero_wp_pmd()
1879 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); in do_huge_zero_wp_pmd()
1880 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in do_huge_zero_wp_pmd()
1899 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1908 /* Fallback to splitting PMD if THP cannot be allocated */ in do_huge_pmd_wp_page()
1914 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1932 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1970 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1971 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1980 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in do_huge_pmd_wp_page()
1985 unsigned long addr, pmd_t pmd) in can_change_pmd_writable() argument
1993 if (pmd_protnone(pmd)) in can_change_pmd_writable()
1997 if (pmd_needs_soft_dirty_wp(vma, pmd)) in can_change_pmd_writable()
2001 if (userfaultfd_huge_pmd_wp(vma, pmd)) in can_change_pmd_writable()
2006 page = vm_normal_page_pmd(vma, addr, pmd); in can_change_pmd_writable()
2011 return pmd_dirty(pmd); in can_change_pmd_writable()
2022 pmd_t pmd, old_pmd; in do_huge_pmd_numa_page() local
2026 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2027 old_pmd = pmdp_get(vmf->pmd); in do_huge_pmd_numa_page()
2034 pmd = pmd_modify(old_pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
2037 * Detect now whether the PMD could be writable; this information in do_huge_pmd_numa_page()
2040 writable = pmd_write(pmd); in do_huge_pmd_numa_page()
2042 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
2045 folio = vm_normal_folio_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
2071 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2072 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { in do_huge_pmd_numa_page()
2077 /* Restore the PMD */ in do_huge_pmd_numa_page()
2078 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); in do_huge_pmd_numa_page()
2079 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
2081 pmd = pmd_mkwrite(pmd, vma); in do_huge_pmd_numa_page()
2082 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
2083 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
2092 * Return true if we do MADV_FREE successfully on entire pmd page.
2096 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
2106 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
2110 orig_pmd = *pmd; in madvise_free_huge_pmd()
2149 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
2153 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
2154 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
2165 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
2169 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
2175 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
2182 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
2191 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
2194 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
2197 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2201 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2222 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); in zap_huge_pmd()
2225 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2229 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2234 * Use flush_needed to indicate whether the PMD entry in zap_huge_pmd()
2255 * With split pmd lock we also need to move preallocated in pmd_move_must_withdraw()
2256 * PTE page table if new_pmd is on different PMD page table. in pmd_move_must_withdraw()
2264 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
2267 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
2268 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2269 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
2270 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2272 return pmd; in move_soft_dirty_pmd()
2275 static pmd_t clear_uffd_wp_pmd(pmd_t pmd) in clear_uffd_wp_pmd() argument
2277 if (pmd_present(pmd)) in clear_uffd_wp_pmd()
2278 pmd = pmd_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2279 else if (is_swap_pmd(pmd)) in clear_uffd_wp_pmd()
2280 pmd = pmd_swp_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2282 return pmd; in clear_uffd_wp_pmd()
2289 pmd_t pmd; in move_huge_pmd() local
2294 * The destination pmd shouldn't be established, free_pgtables() in move_huge_pmd()
2312 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
2313 if (pmd_present(pmd)) in move_huge_pmd()
2322 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
2324 pmd = clear_uffd_wp_pmd(pmd); in move_huge_pmd()
2325 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
2338 * - 0 if PMD could not be locked
2339 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2344 pmd_t *pmd, unsigned long addr, pgprot_t newprot, in change_huge_pmd() argument
2360 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
2365 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
2366 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
2370 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
2381 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
2384 newpmd = *pmd; in change_huge_pmd()
2391 if (!pmd_same(*pmd, newpmd)) in change_huge_pmd()
2392 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
2405 if (is_huge_zero_pmd(*pmd)) in change_huge_pmd()
2408 if (pmd_protnone(*pmd)) in change_huge_pmd()
2411 folio = pmd_folio(*pmd); in change_huge_pmd()
2427 * to not clear pmd intermittently to avoid race with MADV_DONTNEED in change_huge_pmd()
2435 * pmd_trans_huge(*pmd) == 0 (without ptl) in change_huge_pmd()
2436 * // skip the pmd in change_huge_pmd()
2438 * // pmd is re-established in change_huge_pmd()
2440 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it in change_huge_pmd()
2446 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
2465 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
2662 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2667 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
2670 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
2671 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))) in __pmd_trans_huge_lock()
2789 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2799 * Leave pmd empty until pte is filled note that it is fine to delay in __split_huge_zero_page_pmd()
2801 * replacing a zero pmd write protected page with a zero pte write in __split_huge_zero_page_pmd()
2806 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2808 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2825 smp_wmb(); /* make pte visible before pmd */ in __split_huge_zero_page_pmd()
2826 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2829 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2846 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)); in __split_huge_pmd_locked()
2851 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_pmd_locked()
2857 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2881 if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2891 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2894 pmd_migration = is_pmd_migration_entry(*pmd); in __split_huge_pmd_locked()
2898 old_pmd = *pmd; in __split_huge_pmd_locked()
2910 * Up to this point the pmd is present and huge and userland has in __split_huge_pmd_locked()
2912 * happens in place). If we overwrite the pmd with the not-huge in __split_huge_pmd_locked()
2925 * flush_pmd_tlb_range();" we first mark the current pmd in __split_huge_pmd_locked()
2927 * remain set at all times on the pmd until the split is in __split_huge_pmd_locked()
2928 * complete for this pmd), then we flush the SMP TLB and finally in __split_huge_pmd_locked()
2929 * we write the non-huge version of the pmd entry with in __split_huge_pmd_locked()
2932 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2948 * Without "freeze", we'll simply split the PMD, propagating the in __split_huge_pmd_locked()
2957 * In case we cannot clear PageAnonExclusive(), split the PMD in __split_huge_pmd_locked()
2960 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. in __split_huge_pmd_locked()
2978 * Withdraw the table only after we mark the pmd entry invalid. in __split_huge_pmd_locked()
2981 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
3046 smp_wmb(); /* make pte visible before pmd */ in __split_huge_pmd_locked()
3047 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
3051 pmd_t *pmd, bool freeze) in split_huge_pmd_locked() argument
3054 if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd)) in split_huge_pmd_locked()
3055 __split_huge_pmd_locked(vma, pmd, address, freeze); in split_huge_pmd_locked()
3058 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
3068 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
3069 split_huge_pmd_locked(vma, range.start, pmd, freeze); in __split_huge_pmd()
3077 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address() local
3079 if (!pmd) in split_huge_pmd_address()
3082 __split_huge_pmd(vma, pmd, address, freeze); in split_huge_pmd_address()
3089 * contain an hugepage: check if we need to split an huge pmd. in split_huge_pmd_if_needed()
3155 * - clear PMD; barrier; read refcount in __discard_anon_folio_pmd_locked()
3156 * - inc refcount; barrier; read PMD in __discard_anon_folio_pmd_locked()
3170 * If the folio or its PMD is redirtied at this point, or if there in __discard_anon_folio_pmd_locked()
3521 /* Only swapping a whole PMD-mapped folio is supported */ in non_uniform_split_supported()
4592 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
4596 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
4598 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ in set_pmd_migration_entry()
4601 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
4622 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
4640 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
4643 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
4646 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
4650 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
4669 set_pmd_at(mm, haddr, pvmw->pmd, pmde); in remove_migration_pmd()
4672 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()