1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2e2cda322SAndrea Arcangeli /* 3e2cda322SAndrea Arcangeli * mm/pgtable-generic.c 4e2cda322SAndrea Arcangeli * 5ca5999fdSMike Rapoport * Generic pgtable methods declared in linux/pgtable.h 6e2cda322SAndrea Arcangeli * 7e2cda322SAndrea Arcangeli * Copyright (C) 2010 Linus Torvalds 8e2cda322SAndrea Arcangeli */ 9e2cda322SAndrea Arcangeli 10f95ba941SAndrew Morton #include <linux/pagemap.h> 11a31acd3eSPeter Zijlstra #include <linux/hugetlb.h> 12ca5999fdSMike Rapoport #include <linux/pgtable.h> 1336090defSArnd Bergmann #include <linux/mm_inline.h> 14e2cda322SAndrea Arcangeli #include <asm/tlb.h> 15e2cda322SAndrea Arcangeli 16bc4b4448SJoonsoo Kim /* 17bc4b4448SJoonsoo Kim * If a p?d_bad entry is found while walking page tables, report 18bc4b4448SJoonsoo Kim * the error, before resetting entry to p?d_none. Usually (but 19bc4b4448SJoonsoo Kim * very seldom) called out from the p?d_none_or_clear_bad macros. 20bc4b4448SJoonsoo Kim */ 21bc4b4448SJoonsoo Kim 22bc4b4448SJoonsoo Kim void pgd_clear_bad(pgd_t *pgd) 23bc4b4448SJoonsoo Kim { 24bc4b4448SJoonsoo Kim pgd_ERROR(*pgd); 25bc4b4448SJoonsoo Kim pgd_clear(pgd); 26bc4b4448SJoonsoo Kim } 27bc4b4448SJoonsoo Kim 28f2400abcSVineet Gupta #ifndef __PAGETABLE_P4D_FOLDED 29c2febafcSKirill A. Shutemov void p4d_clear_bad(p4d_t *p4d) 30c2febafcSKirill A. Shutemov { 31c2febafcSKirill A. Shutemov p4d_ERROR(*p4d); 32c2febafcSKirill A. Shutemov p4d_clear(p4d); 33c2febafcSKirill A. Shutemov } 34f2400abcSVineet Gupta #endif 35c2febafcSKirill A. Shutemov 36f2400abcSVineet Gupta #ifndef __PAGETABLE_PUD_FOLDED 37bc4b4448SJoonsoo Kim void pud_clear_bad(pud_t *pud) 38bc4b4448SJoonsoo Kim { 39bc4b4448SJoonsoo Kim pud_ERROR(*pud); 40bc4b4448SJoonsoo Kim pud_clear(pud); 41bc4b4448SJoonsoo Kim } 42f2400abcSVineet Gupta #endif 43bc4b4448SJoonsoo Kim 44f2400abcSVineet Gupta /* 45f2400abcSVineet Gupta * Note that the pmd variant below can't be stub'ed out just as for p4d/pud 46f2400abcSVineet Gupta * above. pmd folding is special and typically pmd_* macros refer to upper 47f2400abcSVineet Gupta * level even when folded 48f2400abcSVineet Gupta */ 49bc4b4448SJoonsoo Kim void pmd_clear_bad(pmd_t *pmd) 50bc4b4448SJoonsoo Kim { 51bc4b4448SJoonsoo Kim pmd_ERROR(*pmd); 52bc4b4448SJoonsoo Kim pmd_clear(pmd); 53bc4b4448SJoonsoo Kim } 54bc4b4448SJoonsoo Kim 55e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 56e2cda322SAndrea Arcangeli /* 57cef23d9dSRik van Riel * Only sets the access flags (dirty, accessed), as well as write 58cef23d9dSRik van Riel * permission. Furthermore, we know it always gets set to a "more 59e2cda322SAndrea Arcangeli * permissive" setting, which allows most architectures to optimize 60e2cda322SAndrea Arcangeli * this. We return whether the PTE actually changed, which in turn 61e2cda322SAndrea Arcangeli * instructs the caller to do things like update__mmu_cache. This 62e2cda322SAndrea Arcangeli * used to be done in the caller, but sparc needs minor faults to 63e2cda322SAndrea Arcangeli * force that call on sun4c so we changed this macro slightly 64e2cda322SAndrea Arcangeli */ 65e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma, 66e2cda322SAndrea Arcangeli unsigned long address, pte_t *ptep, 67e2cda322SAndrea Arcangeli pte_t entry, int dirty) 68e2cda322SAndrea Arcangeli { 69e2cda322SAndrea Arcangeli int changed = !pte_same(*ptep, entry); 70e2cda322SAndrea Arcangeli if (changed) { 71e2cda322SAndrea Arcangeli set_pte_at(vma->vm_mm, address, ptep, entry); 72cef23d9dSRik van Riel flush_tlb_fix_spurious_fault(vma, address); 73e2cda322SAndrea Arcangeli } 74e2cda322SAndrea Arcangeli return changed; 75e2cda322SAndrea Arcangeli } 76e2cda322SAndrea Arcangeli #endif 77e2cda322SAndrea Arcangeli 7852585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 7952585bccSVineet Gupta int ptep_clear_flush_young(struct vm_area_struct *vma, 8052585bccSVineet Gupta unsigned long address, pte_t *ptep) 8152585bccSVineet Gupta { 8252585bccSVineet Gupta int young; 8352585bccSVineet Gupta young = ptep_test_and_clear_young(vma, address, ptep); 8452585bccSVineet Gupta if (young) 8552585bccSVineet Gupta flush_tlb_page(vma, address); 8652585bccSVineet Gupta return young; 8752585bccSVineet Gupta } 8852585bccSVineet Gupta #endif 8952585bccSVineet Gupta 9052585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 9152585bccSVineet Gupta pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 9252585bccSVineet Gupta pte_t *ptep) 9352585bccSVineet Gupta { 9452585bccSVineet Gupta struct mm_struct *mm = (vma)->vm_mm; 9552585bccSVineet Gupta pte_t pte; 9652585bccSVineet Gupta pte = ptep_get_and_clear(mm, address, ptep); 9752585bccSVineet Gupta if (pte_accessible(mm, pte)) 9852585bccSVineet Gupta flush_tlb_page(vma, address); 9952585bccSVineet Gupta return pte; 10052585bccSVineet Gupta } 10152585bccSVineet Gupta #endif 10252585bccSVineet Gupta 103bd5e88adSVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE 104bd5e88adSVineet Gupta 105e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 106e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma, 107e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp, 108e2cda322SAndrea Arcangeli pmd_t entry, int dirty) 109e2cda322SAndrea Arcangeli { 110e2cda322SAndrea Arcangeli int changed = !pmd_same(*pmdp, entry); 111e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 112e2cda322SAndrea Arcangeli if (changed) { 113e2cda322SAndrea Arcangeli set_pmd_at(vma->vm_mm, address, pmdp, entry); 11412ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 115e2cda322SAndrea Arcangeli } 116e2cda322SAndrea Arcangeli return changed; 117e2cda322SAndrea Arcangeli } 118e2cda322SAndrea Arcangeli #endif 119e2cda322SAndrea Arcangeli 120e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 121e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma, 122e2cda322SAndrea Arcangeli unsigned long address, pmd_t *pmdp) 123e2cda322SAndrea Arcangeli { 124e2cda322SAndrea Arcangeli int young; 125d8c37c48SNaoya Horiguchi VM_BUG_ON(address & ~HPAGE_PMD_MASK); 126e2cda322SAndrea Arcangeli young = pmdp_test_and_clear_young(vma, address, pmdp); 127e2cda322SAndrea Arcangeli if (young) 12812ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 129e2cda322SAndrea Arcangeli return young; 130e2cda322SAndrea Arcangeli } 131e2cda322SAndrea Arcangeli #endif 132e2cda322SAndrea Arcangeli 1338809aa2dSAneesh Kumar K.V #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1348809aa2dSAneesh Kumar K.V pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 135e2cda322SAndrea Arcangeli pmd_t *pmdp) 136e2cda322SAndrea Arcangeli { 137e2cda322SAndrea Arcangeli pmd_t pmd; 138e2cda322SAndrea Arcangeli VM_BUG_ON(address & ~HPAGE_PMD_MASK); 13999fa8a48SHugh Dickins VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && 14099fa8a48SHugh Dickins !pmd_devmap(*pmdp)); 1418809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 14212ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 143e2cda322SAndrea Arcangeli return pmd; 144e2cda322SAndrea Arcangeli } 145a00cc7d9SMatthew Wilcox 146a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 147a00cc7d9SMatthew Wilcox pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 148a00cc7d9SMatthew Wilcox pud_t *pudp) 149a00cc7d9SMatthew Wilcox { 150a00cc7d9SMatthew Wilcox pud_t pud; 151a00cc7d9SMatthew Wilcox 152a00cc7d9SMatthew Wilcox VM_BUG_ON(address & ~HPAGE_PUD_MASK); 153a00cc7d9SMatthew Wilcox VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); 154a00cc7d9SMatthew Wilcox pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 155a00cc7d9SMatthew Wilcox flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); 156a00cc7d9SMatthew Wilcox return pud; 157a00cc7d9SMatthew Wilcox } 158a00cc7d9SMatthew Wilcox #endif 159e2cda322SAndrea Arcangeli #endif 160e2cda322SAndrea Arcangeli 161e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 1626b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1636b0b50b0SAneesh Kumar K.V pgtable_t pgtable) 164e3ebcf64SGerald Schaefer { 165c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 166e3ebcf64SGerald Schaefer 167e3ebcf64SGerald Schaefer /* FIFO */ 168c389a250SKirill A. Shutemov if (!pmd_huge_pte(mm, pmdp)) 169e3ebcf64SGerald Schaefer INIT_LIST_HEAD(&pgtable->lru); 170e3ebcf64SGerald Schaefer else 171c389a250SKirill A. Shutemov list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 172c389a250SKirill A. Shutemov pmd_huge_pte(mm, pmdp) = pgtable; 173e3ebcf64SGerald Schaefer } 174e3ebcf64SGerald Schaefer #endif 175e3ebcf64SGerald Schaefer 176e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 177e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */ 1786b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 179e3ebcf64SGerald Schaefer { 180e3ebcf64SGerald Schaefer pgtable_t pgtable; 181e3ebcf64SGerald Schaefer 182c4088ebdSKirill A. Shutemov assert_spin_locked(pmd_lockptr(mm, pmdp)); 183e3ebcf64SGerald Schaefer 184e3ebcf64SGerald Schaefer /* FIFO */ 185c389a250SKirill A. Shutemov pgtable = pmd_huge_pte(mm, pmdp); 18614669347SGeliang Tang pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 187e3ebcf64SGerald Schaefer struct page, lru); 18814669347SGeliang Tang if (pmd_huge_pte(mm, pmdp)) 189e3ebcf64SGerald Schaefer list_del(&pgtable->lru); 190e3ebcf64SGerald Schaefer return pgtable; 191e3ebcf64SGerald Schaefer } 192e3ebcf64SGerald Schaefer #endif 19346dcde73SGerald Schaefer 19446dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE 195d52605d7SKirill A. Shutemov pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 19646dcde73SGerald Schaefer pmd_t *pmdp) 19746dcde73SGerald Schaefer { 19886ec2da0SAnshuman Khandual pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); 19912ebc158SVineet Gupta flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 200d52605d7SKirill A. Shutemov return old; 20146dcde73SGerald Schaefer } 20246dcde73SGerald Schaefer #endif 203f28b6ff8SAneesh Kumar K.V 204*4f831457SNadav Amit #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 205*4f831457SNadav Amit pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, 206*4f831457SNadav Amit pmd_t *pmdp) 207*4f831457SNadav Amit { 208*4f831457SNadav Amit return pmdp_invalidate(vma, address, pmdp); 209*4f831457SNadav Amit } 210*4f831457SNadav Amit #endif 211*4f831457SNadav Amit 212f28b6ff8SAneesh Kumar K.V #ifndef pmdp_collapse_flush 213f28b6ff8SAneesh Kumar K.V pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 214f28b6ff8SAneesh Kumar K.V pmd_t *pmdp) 215f28b6ff8SAneesh Kumar K.V { 2168809aa2dSAneesh Kumar K.V /* 2178809aa2dSAneesh Kumar K.V * pmd and hugepage pte format are same. So we could 2188809aa2dSAneesh Kumar K.V * use the same function. 2198809aa2dSAneesh Kumar K.V */ 220f28b6ff8SAneesh Kumar K.V pmd_t pmd; 221f28b6ff8SAneesh Kumar K.V 222f28b6ff8SAneesh Kumar K.V VM_BUG_ON(address & ~HPAGE_PMD_MASK); 223f28b6ff8SAneesh Kumar K.V VM_BUG_ON(pmd_trans_huge(*pmdp)); 2248809aa2dSAneesh Kumar K.V pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 2256a6ac72fSVineet Gupta 2266a6ac72fSVineet Gupta /* collapse entails shooting down ptes not pmd */ 2276a6ac72fSVineet Gupta flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 228f28b6ff8SAneesh Kumar K.V return pmd; 229f28b6ff8SAneesh Kumar K.V } 230f28b6ff8SAneesh Kumar K.V #endif 231bd5e88adSVineet Gupta #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 232