xref: /linux/arch/powerpc/include/asm/book3s/pgtable.h (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_PGTABLE_H
4 
5 #ifdef CONFIG_PPC64
6 #include <asm/book3s/64/pgtable.h>
7 #else
8 #include <asm/book3s/32/pgtable.h>
9 #endif
10 
11 #ifndef __ASSEMBLY__
12 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
13 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
14 				 pte_t *ptep, pte_t entry, int dirty);
15 
16 struct file;
17 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
18 				     unsigned long size, pgprot_t vma_prot);
19 #define __HAVE_PHYS_MEM_ACCESS_PROT
20 
21 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
22 
23 /*
24  * This gets called at the end of handling a page fault, when
25  * the kernel has put a new PTE into the page table for the process.
26  * We use it to ensure coherency between the i-cache and d-cache
27  * for the page which has just been mapped in.
28  * On machines which use an MMU hash table, we use this to put a
29  * corresponding HPTE into the hash table ahead of time, instead of
30  * waiting for the inevitable extra hash-table miss exception.
31  */
32 static inline void update_mmu_cache_range(struct vm_fault *vmf,
33 		struct vm_area_struct *vma, unsigned long address,
34 		pte_t *ptep, unsigned int nr)
35 {
36 	if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
37 		return;
38 	if (radix_enabled())
39 		return;
40 	__update_mmu_cache(vma, address, ptep);
41 }
42 
43 #endif /* __ASSEMBLY__ */
44 #endif
45