xref: /linux/arch/powerpc/include/asm/nohash/pgtable.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
217ed9e31SAneesh Kumar K.V #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
317ed9e31SAneesh Kumar K.V #define _ASM_POWERPC_NOHASH_PGTABLE_H
417ed9e31SAneesh Kumar K.V 
542a27223SChristophe Leroy #ifndef __ASSEMBLY__
642a27223SChristophe Leroy static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
742a27223SChristophe Leroy 				     unsigned long clr, unsigned long set, int huge);
842a27223SChristophe Leroy #endif
942a27223SChristophe Leroy 
1017ed9e31SAneesh Kumar K.V #if defined(CONFIG_PPC64)
1117ed9e31SAneesh Kumar K.V #include <asm/nohash/64/pgtable.h>
1217ed9e31SAneesh Kumar K.V #else
1317ed9e31SAneesh Kumar K.V #include <asm/nohash/32/pgtable.h>
1417ed9e31SAneesh Kumar K.V #endif
1517ed9e31SAneesh Kumar K.V 
1627672be7SChristophe Leroy /*
1727672be7SChristophe Leroy  * _PAGE_CHG_MASK masks of bits that are to be preserved across
1827672be7SChristophe Leroy  * pgprot changes.
1927672be7SChristophe Leroy  */
2027672be7SChristophe Leroy #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
2127672be7SChristophe Leroy 
226c5d2d3fSChristophe Leroy /* Permission masks used for kernel mappings */
236c5d2d3fSChristophe Leroy #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
246c5d2d3fSChristophe Leroy #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
256cc07821SChristophe Leroy #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
266c5d2d3fSChristophe Leroy #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
276c5d2d3fSChristophe Leroy #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
286c5d2d3fSChristophe Leroy #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
296c5d2d3fSChristophe Leroy 
3017ed9e31SAneesh Kumar K.V #ifndef __ASSEMBLY__
3117ed9e31SAneesh Kumar K.V 
3242a27223SChristophe Leroy extern int icache_44x_need_flush;
3342a27223SChristophe Leroy 
34*7c44202eSChristophe Leroy #ifndef pte_huge_size
pte_huge_size(pte_t pte)35*7c44202eSChristophe Leroy static inline unsigned long pte_huge_size(pte_t pte)
36*7c44202eSChristophe Leroy {
37*7c44202eSChristophe Leroy 	return PAGE_SIZE;
38*7c44202eSChristophe Leroy }
39*7c44202eSChristophe Leroy #endif
40*7c44202eSChristophe Leroy 
4142a27223SChristophe Leroy /*
4242a27223SChristophe Leroy  * PTE updates. This function is called whenever an existing
4342a27223SChristophe Leroy  * valid PTE is updated. This does -not- include set_pte_at()
4442a27223SChristophe Leroy  * which nowadays only sets a new PTE.
4542a27223SChristophe Leroy  *
4642a27223SChristophe Leroy  * Depending on the type of MMU, we may need to use atomic updates
4742a27223SChristophe Leroy  * and the PTE may be either 32 or 64 bit wide. In the later case,
4842a27223SChristophe Leroy  * when using atomic updates, only the low part of the PTE is
4942a27223SChristophe Leroy  * accessed atomically.
5042a27223SChristophe Leroy  *
5142a27223SChristophe Leroy  * In addition, on 44x, we also maintain a global flag indicating
5242a27223SChristophe Leroy  * that an executable user mapping was modified, which is needed
5342a27223SChristophe Leroy  * to properly flush the virtually tagged instruction cache of
5442a27223SChristophe Leroy  * those implementations.
5542a27223SChristophe Leroy  */
5642a27223SChristophe Leroy #ifndef pte_update
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)5742a27223SChristophe Leroy static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
5842a27223SChristophe Leroy 				     unsigned long clr, unsigned long set, int huge)
5942a27223SChristophe Leroy {
6042a27223SChristophe Leroy 	pte_basic_t old = pte_val(*p);
6142a27223SChristophe Leroy 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
62*7c44202eSChristophe Leroy 	unsigned long sz;
63*7c44202eSChristophe Leroy 	unsigned long pdsize;
64*7c44202eSChristophe Leroy 	int i;
6542a27223SChristophe Leroy 
667c929ad0SChristophe Leroy 	if (new == old)
677c929ad0SChristophe Leroy 		return old;
687c929ad0SChristophe Leroy 
69*7c44202eSChristophe Leroy 	if (huge)
70*7c44202eSChristophe Leroy 		sz = pte_huge_size(__pte(old));
71*7c44202eSChristophe Leroy 	else
72*7c44202eSChristophe Leroy 		sz = PAGE_SIZE;
73*7c44202eSChristophe Leroy 
74*7c44202eSChristophe Leroy 	if (sz < PMD_SIZE)
75*7c44202eSChristophe Leroy 		pdsize = PAGE_SIZE;
76*7c44202eSChristophe Leroy 	else if (sz < PUD_SIZE)
77*7c44202eSChristophe Leroy 		pdsize = PMD_SIZE;
78*7c44202eSChristophe Leroy 	else if (sz < P4D_SIZE)
79*7c44202eSChristophe Leroy 		pdsize = PUD_SIZE;
80*7c44202eSChristophe Leroy 	else if (sz < PGDIR_SIZE)
81*7c44202eSChristophe Leroy 		pdsize = P4D_SIZE;
82*7c44202eSChristophe Leroy 	else
83*7c44202eSChristophe Leroy 		pdsize = PGDIR_SIZE;
84*7c44202eSChristophe Leroy 
85*7c44202eSChristophe Leroy 	for (i = 0; i < sz / pdsize; i++, p++) {
8642a27223SChristophe Leroy 		*p = __pte(new);
87*7c44202eSChristophe Leroy 		if (new)
88*7c44202eSChristophe Leroy 			new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT;
89*7c44202eSChristophe Leroy 	}
9042a27223SChristophe Leroy 
91a7858747SChristophe Leroy 	if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
9242a27223SChristophe Leroy 		icache_44x_need_flush = 1;
9342a27223SChristophe Leroy 
9442a27223SChristophe Leroy 	/* huge pages use the old page table lock */
9542a27223SChristophe Leroy 	if (!huge)
9642a27223SChristophe Leroy 		assert_pte_locked(mm, addr);
9742a27223SChristophe Leroy 
9842a27223SChristophe Leroy 	return old;
9942a27223SChristophe Leroy }
10042a27223SChristophe Leroy #endif
10142a27223SChristophe Leroy 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1028c3d9eb3SChristophe Leroy static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1038c3d9eb3SChristophe Leroy 					    unsigned long addr, pte_t *ptep)
1048c3d9eb3SChristophe Leroy {
1058c3d9eb3SChristophe Leroy 	unsigned long old;
1068c3d9eb3SChristophe Leroy 
1078c3d9eb3SChristophe Leroy 	old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
1088c3d9eb3SChristophe Leroy 
1098c3d9eb3SChristophe Leroy 	return (old & _PAGE_ACCESSED) != 0;
1108c3d9eb3SChristophe Leroy }
1118c3d9eb3SChristophe Leroy #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1128c3d9eb3SChristophe Leroy 
113cc68d77fSChristophe Leroy #ifndef ptep_set_wrprotect
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)114cc68d77fSChristophe Leroy static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
115cc68d77fSChristophe Leroy 				      pte_t *ptep)
116cc68d77fSChristophe Leroy {
117d20506d4SChristophe Leroy 	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
118cc68d77fSChristophe Leroy }
119cc68d77fSChristophe Leroy #endif
120cc68d77fSChristophe Leroy #define __HAVE_ARCH_PTEP_SET_WRPROTECT
121cc68d77fSChristophe Leroy 
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)122cc68d77fSChristophe Leroy static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
123cc68d77fSChristophe Leroy 				       pte_t *ptep)
124cc68d77fSChristophe Leroy {
125cc68d77fSChristophe Leroy 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
126cc68d77fSChristophe Leroy }
127cc68d77fSChristophe Leroy #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
128cc68d77fSChristophe Leroy 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1292ef9f4bbSChristophe Leroy static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1302ef9f4bbSChristophe Leroy {
1312ef9f4bbSChristophe Leroy 	pte_update(mm, addr, ptep, ~0UL, 0, 0);
1322ef9f4bbSChristophe Leroy }
1332ef9f4bbSChristophe Leroy 
134799d8836SChristophe Leroy /* Set the dirty and/or accessed bits atomically in a linux PTE */
135799d8836SChristophe Leroy #ifndef __ptep_set_access_flags
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)136799d8836SChristophe Leroy static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
137799d8836SChristophe Leroy 					   pte_t *ptep, pte_t entry,
138799d8836SChristophe Leroy 					   unsigned long address,
139799d8836SChristophe Leroy 					   int psize)
140799d8836SChristophe Leroy {
141799d8836SChristophe Leroy 	unsigned long set = pte_val(entry) &
142799d8836SChristophe Leroy 			    (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
143799d8836SChristophe Leroy 	int huge = psize > mmu_virtual_psize ? 1 : 0;
144799d8836SChristophe Leroy 
145799d8836SChristophe Leroy 	pte_update(vma->vm_mm, address, ptep, 0, set, huge);
146799d8836SChristophe Leroy 
147799d8836SChristophe Leroy 	flush_tlb_page(vma, address);
148799d8836SChristophe Leroy }
149799d8836SChristophe Leroy #endif
150799d8836SChristophe Leroy 
15117ed9e31SAneesh Kumar K.V /* Generic accessors to PTE bits */
1523a428816SChristophe Leroy #ifndef pte_mkwrite_novma
pte_mkwrite_novma(pte_t pte)1533a428816SChristophe Leroy static inline pte_t pte_mkwrite_novma(pte_t pte)
1543a428816SChristophe Leroy {
155d20506d4SChristophe Leroy 	/*
156d20506d4SChristophe Leroy 	 * write implies read, hence set both
157d20506d4SChristophe Leroy 	 */
1583a428816SChristophe Leroy 	return __pte(pte_val(pte) | _PAGE_RW);
1593a428816SChristophe Leroy }
1603a428816SChristophe Leroy #endif
1613a428816SChristophe Leroy 
pte_mkdirty(pte_t pte)1623a428816SChristophe Leroy static inline pte_t pte_mkdirty(pte_t pte)
1633a428816SChristophe Leroy {
1643a428816SChristophe Leroy 	return __pte(pte_val(pte) | _PAGE_DIRTY);
1653a428816SChristophe Leroy }
1663a428816SChristophe Leroy 
pte_mkyoung(pte_t pte)1673a428816SChristophe Leroy static inline pte_t pte_mkyoung(pte_t pte)
1683a428816SChristophe Leroy {
1693a428816SChristophe Leroy 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
1703a428816SChristophe Leroy }
1713a428816SChristophe Leroy 
1723a428816SChristophe Leroy #ifndef pte_wrprotect
pte_wrprotect(pte_t pte)1733a428816SChristophe Leroy static inline pte_t pte_wrprotect(pte_t pte)
1743a428816SChristophe Leroy {
175d20506d4SChristophe Leroy 	return __pte(pte_val(pte) & ~_PAGE_WRITE);
1763a428816SChristophe Leroy }
1773a428816SChristophe Leroy #endif
1783a428816SChristophe Leroy 
1793a428816SChristophe Leroy #ifndef pte_mkexec
pte_mkexec(pte_t pte)1803a428816SChristophe Leroy static inline pte_t pte_mkexec(pte_t pte)
1813a428816SChristophe Leroy {
1823a428816SChristophe Leroy 	return __pte(pte_val(pte) | _PAGE_EXEC);
1833a428816SChristophe Leroy }
1843a428816SChristophe Leroy #endif
1853a428816SChristophe Leroy 
186a0da4bc1SChristophe Leroy #ifndef pte_write
pte_write(pte_t pte)18717ed9e31SAneesh Kumar K.V static inline int pte_write(pte_t pte)
18817ed9e31SAneesh Kumar K.V {
189d20506d4SChristophe Leroy 	return pte_val(pte) & _PAGE_WRITE;
19017ed9e31SAneesh Kumar K.V }
191a0da4bc1SChristophe Leroy #endif
pte_dirty(pte_t pte)19217ed9e31SAneesh Kumar K.V static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
pte_special(pte_t pte)19317ed9e31SAneesh Kumar K.V static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
pte_none(pte_t pte)19417ed9e31SAneesh Kumar K.V static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_hashpte(pte_t pte)195daba7902SChristophe Leroy static inline bool pte_hashpte(pte_t pte)	{ return false; }
pte_ci(pte_t pte)196daba7902SChristophe Leroy static inline bool pte_ci(pte_t pte)		{ return pte_val(pte) & _PAGE_NO_CACHE; }
pte_exec(pte_t pte)197daba7902SChristophe Leroy static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
19817ed9e31SAneesh Kumar K.V 
pte_present(pte_t pte)19917ed9e31SAneesh Kumar K.V static inline int pte_present(pte_t pte)
20017ed9e31SAneesh Kumar K.V {
20117ed9e31SAneesh Kumar K.V 	return pte_val(pte) & _PAGE_PRESENT;
20217ed9e31SAneesh Kumar K.V }
20317ed9e31SAneesh Kumar K.V 
pte_hw_valid(pte_t pte)204daba7902SChristophe Leroy static inline bool pte_hw_valid(pte_t pte)
205daba7902SChristophe Leroy {
206daba7902SChristophe Leroy 	return pte_val(pte) & _PAGE_PRESENT;
207daba7902SChristophe Leroy }
208daba7902SChristophe Leroy 
pte_young(pte_t pte)2093a428816SChristophe Leroy static inline int pte_young(pte_t pte)
2103a428816SChristophe Leroy {
2113a428816SChristophe Leroy 	return pte_val(pte) & _PAGE_ACCESSED;
2123a428816SChristophe Leroy }
2133a428816SChristophe Leroy 
2145769beafSAneesh Kumar K.V /*
215163a72faSChristophe Leroy  * Don't just check for any non zero bits in __PAGE_READ, since for book3e
216e0f57031SChristophe Leroy  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
217163a72faSChristophe Leroy  * _PAGE_READ.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
218e0f57031SChristophe Leroy  */
2198e9bd41eSChristophe Leroy #ifndef pte_read
pte_read(pte_t pte)2208e9bd41eSChristophe Leroy static inline bool pte_read(pte_t pte)
221e0f57031SChristophe Leroy {
2228e9bd41eSChristophe Leroy 	return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
223e0f57031SChristophe Leroy }
224a0da4bc1SChristophe Leroy #endif
225e0f57031SChristophe Leroy 
226e0f57031SChristophe Leroy /*
2275769beafSAneesh Kumar K.V  * We only find page table entry in the last level
2285769beafSAneesh Kumar K.V  * Hence no need for other accessors
2295769beafSAneesh Kumar K.V  */
2305769beafSAneesh Kumar K.V #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)2315769beafSAneesh Kumar K.V static inline bool pte_access_permitted(pte_t pte, bool write)
2325769beafSAneesh Kumar K.V {
2335769beafSAneesh Kumar K.V 	/*
234163a72faSChristophe Leroy 	 * A read-only access is controlled by _PAGE_READ bit.
235b1fba034SChristophe Leroy 	 * We have _PAGE_READ set for WRITE
2365769beafSAneesh Kumar K.V 	 */
2378e9bd41eSChristophe Leroy 	if (!pte_present(pte) || !pte_read(pte))
238810e9f86SChristophe Leroy 		return false;
2395769beafSAneesh Kumar K.V 
240810e9f86SChristophe Leroy 	if (write && !pte_write(pte))
2415769beafSAneesh Kumar K.V 		return false;
2425769beafSAneesh Kumar K.V 
2435769beafSAneesh Kumar K.V 	return true;
2445769beafSAneesh Kumar K.V }
2455769beafSAneesh Kumar K.V 
24617ed9e31SAneesh Kumar K.V /* Conversion functions: convert a page and protection to a page entry,
24717ed9e31SAneesh Kumar K.V  * and a page entry and page directory to the page they refer to.
24817ed9e31SAneesh Kumar K.V  *
24917ed9e31SAneesh Kumar K.V  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
25017ed9e31SAneesh Kumar K.V  * long for now.
25117ed9e31SAneesh Kumar K.V  */
pfn_pte(unsigned long pfn,pgprot_t pgprot)25217ed9e31SAneesh Kumar K.V static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
25317ed9e31SAneesh Kumar K.V 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
25417ed9e31SAneesh Kumar K.V 		     pgprot_val(pgprot)); }
25517ed9e31SAneesh Kumar K.V 
25617ed9e31SAneesh Kumar K.V /* Generic modifiers for PTE bits */
pte_exprotect(pte_t pte)257daba7902SChristophe Leroy static inline pte_t pte_exprotect(pte_t pte)
258daba7902SChristophe Leroy {
259daba7902SChristophe Leroy 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
260daba7902SChristophe Leroy }
261daba7902SChristophe Leroy 
pte_mkclean(pte_t pte)26217ed9e31SAneesh Kumar K.V static inline pte_t pte_mkclean(pte_t pte)
26317ed9e31SAneesh Kumar K.V {
264a0da4bc1SChristophe Leroy 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
26517ed9e31SAneesh Kumar K.V }
26617ed9e31SAneesh Kumar K.V 
pte_mkold(pte_t pte)26717ed9e31SAneesh Kumar K.V static inline pte_t pte_mkold(pte_t pte)
26817ed9e31SAneesh Kumar K.V {
26917ed9e31SAneesh Kumar K.V 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
27017ed9e31SAneesh Kumar K.V }
27117ed9e31SAneesh Kumar K.V 
pte_mkspecial(pte_t pte)27217ed9e31SAneesh Kumar K.V static inline pte_t pte_mkspecial(pte_t pte)
27317ed9e31SAneesh Kumar K.V {
27417ed9e31SAneesh Kumar K.V 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
27517ed9e31SAneesh Kumar K.V }
27617ed9e31SAneesh Kumar K.V 
277a0da4bc1SChristophe Leroy #ifndef pte_mkhuge
pte_mkhuge(pte_t pte)27817ed9e31SAneesh Kumar K.V static inline pte_t pte_mkhuge(pte_t pte)
27917ed9e31SAneesh Kumar K.V {
280a0da4bc1SChristophe Leroy 	return __pte(pte_val(pte));
28117ed9e31SAneesh Kumar K.V }
282a0da4bc1SChristophe Leroy #endif
28317ed9e31SAneesh Kumar K.V 
pte_modify(pte_t pte,pgprot_t newprot)28417ed9e31SAneesh Kumar K.V static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
28517ed9e31SAneesh Kumar K.V {
28617ed9e31SAneesh Kumar K.V 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
28717ed9e31SAneesh Kumar K.V }
28817ed9e31SAneesh Kumar K.V 
pte_swp_exclusive(pte_t pte)2892bba2ffbSDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
2902bba2ffbSDavid Hildenbrand {
2912bba2ffbSDavid Hildenbrand 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
2922bba2ffbSDavid Hildenbrand }
2932bba2ffbSDavid Hildenbrand 
pte_swp_mkexclusive(pte_t pte)2942bba2ffbSDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
2952bba2ffbSDavid Hildenbrand {
2962bba2ffbSDavid Hildenbrand 	return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
2972bba2ffbSDavid Hildenbrand }
2982bba2ffbSDavid Hildenbrand 
pte_swp_clear_exclusive(pte_t pte)2992bba2ffbSDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
3002bba2ffbSDavid Hildenbrand {
3012bba2ffbSDavid Hildenbrand 	return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
3022bba2ffbSDavid Hildenbrand }
3032bba2ffbSDavid Hildenbrand 
30417ed9e31SAneesh Kumar K.V /* This low level function performs the actual PTE insertion
30517ed9e31SAneesh Kumar K.V  * Setting the PTE depends on the MMU type and other factors. It's
30617ed9e31SAneesh Kumar K.V  * an horrible mess that I'm not going to try to clean up now but
30717ed9e31SAneesh Kumar K.V  * I'm keeping it in one place rather than spread around
30817ed9e31SAneesh Kumar K.V  */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)30917ed9e31SAneesh Kumar K.V static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
31017ed9e31SAneesh Kumar K.V 				pte_t *ptep, pte_t pte, int percpu)
31117ed9e31SAneesh Kumar K.V {
31217ed9e31SAneesh Kumar K.V 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
31317ed9e31SAneesh Kumar K.V 	 * can just store as long as we do the two halves in the right order
31445201c87SChristophe Leroy 	 * with a barrier in between.
31545201c87SChristophe Leroy 	 * In the percpu case, we also fallback to the simple update
31617ed9e31SAneesh Kumar K.V 	 */
317d5808ffaSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
31817ed9e31SAneesh Kumar K.V 		__asm__ __volatile__("\
319d85be8a4SMathieu Desnoyers 			stw%X0 %2,%0\n\
3202255411dSChristophe Leroy 			mbar\n\
321d85be8a4SMathieu Desnoyers 			stw%X1 %L2,%1"
32217ed9e31SAneesh Kumar K.V 		: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
32317ed9e31SAneesh Kumar K.V 		: "r" (pte) : "memory");
324d5808ffaSChristophe Leroy 		return;
325d5808ffaSChristophe Leroy 	}
32617ed9e31SAneesh Kumar K.V 	/* Anything else just stores the PTE normally. That covers all 64-bit
32717ed9e31SAneesh Kumar K.V 	 * cases, and 32-bit non-hash with 32-bit PTEs.
32817ed9e31SAneesh Kumar K.V 	 */
32955c8fc3fSChristophe Leroy #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
3300b472181SChristophe Leroy 	ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
33155c8fc3fSChristophe Leroy #else
33217ed9e31SAneesh Kumar K.V 	*ptep = pte;
33355c8fc3fSChristophe Leroy #endif
33417ed9e31SAneesh Kumar K.V 
33517ed9e31SAneesh Kumar K.V 	/*
33617ed9e31SAneesh Kumar K.V 	 * With hardware tablewalk, a sync is needed to ensure that
33717ed9e31SAneesh Kumar K.V 	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
33817ed9e31SAneesh Kumar K.V 	 * mappings, we can't tolerate spurious faults, so make sure
33917ed9e31SAneesh Kumar K.V 	 * the new PTE will be seen the first time.
34017ed9e31SAneesh Kumar K.V 	 */
341d5808ffaSChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
34217ed9e31SAneesh Kumar K.V 		mb();
34317ed9e31SAneesh Kumar K.V }
34417ed9e31SAneesh Kumar K.V 
34517ed9e31SAneesh Kumar K.V /*
34617ed9e31SAneesh Kumar K.V  * Macro to mark a page protection value as "uncacheable".
34717ed9e31SAneesh Kumar K.V  */
34817ed9e31SAneesh Kumar K.V 
34917ed9e31SAneesh Kumar K.V #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
35017ed9e31SAneesh Kumar K.V 			 _PAGE_WRITETHRU)
35117ed9e31SAneesh Kumar K.V 
35217ed9e31SAneesh Kumar K.V #define pgprot_noncached(prot)	  (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
35317ed9e31SAneesh Kumar K.V 				            _PAGE_NO_CACHE | _PAGE_GUARDED))
35417ed9e31SAneesh Kumar K.V 
35517ed9e31SAneesh Kumar K.V #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
35617ed9e31SAneesh Kumar K.V 				            _PAGE_NO_CACHE))
35717ed9e31SAneesh Kumar K.V 
35817ed9e31SAneesh Kumar K.V #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
35917ed9e31SAneesh Kumar K.V 				            _PAGE_COHERENT))
36017ed9e31SAneesh Kumar K.V 
3615f356497SChristophe Leroy #if _PAGE_WRITETHRU != 0
36217ed9e31SAneesh Kumar K.V #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
36317ed9e31SAneesh Kumar K.V 				            _PAGE_COHERENT | _PAGE_WRITETHRU))
36456f3c141SChristophe Leroy #else
36556f3c141SChristophe Leroy #define pgprot_cached_wthru(prot)	pgprot_noncached(prot)
3665f356497SChristophe Leroy #endif
36717ed9e31SAneesh Kumar K.V 
36817ed9e31SAneesh Kumar K.V #define pgprot_cached_noncoherent(prot) \
36917ed9e31SAneesh Kumar K.V 		(__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
37017ed9e31SAneesh Kumar K.V 
37117ed9e31SAneesh Kumar K.V #define pgprot_writecombine pgprot_noncached_wc
37217ed9e31SAneesh Kumar K.V 
37378350069SChristophe Leroy int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
37478350069SChristophe Leroy void unmap_kernel_page(unsigned long va);
37578350069SChristophe Leroy 
37617ed9e31SAneesh Kumar K.V #endif /* __ASSEMBLY__ */
37717ed9e31SAneesh Kumar K.V #endif
378