xref: /linux/arch/powerpc/include/asm/pgtable.h (revision aec3202247b4ab41c5bf3b9f704a2d9a323a051b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
4 
5 #ifndef __ASSEMBLER__
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h>		/* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
11 #include <asm/tlbflush.h>
12 
13 struct mm_struct;
14 
15 #endif /* !__ASSEMBLER__ */
16 
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
19 #else
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
22 
23 /* Make modules code happy. We don't set RO yet */
24 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
25 
26 /* Advertise special mapping type for AGP */
27 #define PAGE_AGP		(PAGE_KERNEL_NC)
28 #define HAVE_PAGE_AGP
29 
30 #ifndef __ASSEMBLER__
31 
32 #define PFN_PTE_SHIFT		PTE_RPN_SHIFT
33 
34 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
35 		pte_t pte, unsigned int nr);
36 #define set_ptes set_ptes
37 void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
38 			  pte_t *ptep, pte_t pte);
39 #define update_mmu_cache(vma, addr, ptep) \
40 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
41 
42 #ifndef MAX_PTRS_PER_PGD
43 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
44 #endif
45 
46 /* Keep this as a macro to avoid include dependency mess */
47 #define pte_page(x)		pfn_to_page(pte_pfn(x))
48 
49 static inline unsigned long pte_pfn(pte_t pte)
50 {
51 	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
52 }
53 
54 /*
55  * Select all bits except the pfn
56  */
57 #define pte_pgprot pte_pgprot
58 static inline pgprot_t pte_pgprot(pte_t pte)
59 {
60 	unsigned long pte_flags;
61 
62 	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
63 	return __pgprot(pte_flags);
64 }
65 
66 #ifdef CONFIG_PPC64
67 #define pmd_pgprot pmd_pgprot
68 static inline pgprot_t pmd_pgprot(pmd_t pmd)
69 {
70 	return pte_pgprot(pmd_pte(pmd));
71 }
72 
73 #define pud_pgprot pud_pgprot
74 static inline pgprot_t pud_pgprot(pud_t pud)
75 {
76 	return pte_pgprot(pud_pte(pud));
77 }
78 #endif /* CONFIG_PPC64 */
79 
80 static inline pgprot_t pgprot_nx(pgprot_t prot)
81 {
82 	return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
83 }
84 #define pgprot_nx pgprot_nx
85 
86 #ifndef pmd_page_vaddr
87 static inline const void *pmd_page_vaddr(pmd_t pmd)
88 {
89 	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
90 }
91 #define pmd_page_vaddr pmd_page_vaddr
92 #endif
93 /*
94  * ZERO_PAGE is a global shared page that is always zero: used
95  * for zero-mapped memory areas etc..
96  */
97 extern unsigned long empty_zero_page[];
98 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
99 
100 extern pgd_t swapper_pg_dir[];
101 
102 extern void paging_init(void);
103 void poking_init(void);
104 
105 extern unsigned long ioremap_bot;
106 extern const pgprot_t protection_map[16];
107 
108 /* can we use this in kvm */
109 unsigned long vmalloc_to_phys(void *vmalloc_addr);
110 
111 void pgtable_cache_add(unsigned int shift);
112 
113 #ifdef CONFIG_PPC32
114 void __init *early_alloc_pgtable(unsigned long size);
115 #endif
116 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
117 
118 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
119 void mark_initmem_nx(void);
120 #else
121 static inline void mark_initmem_nx(void) { }
122 #endif
123 
124 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
125 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
126 			  pte_t *ptep, pte_t entry, int dirty);
127 
128 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
129 				pgprot_t vma_prot);
130 
131 struct file;
132 static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
133 					    unsigned long size, pgprot_t vma_prot)
134 {
135 	return __phys_mem_access_prot(pfn, size, vma_prot);
136 }
137 #define __HAVE_PHYS_MEM_ACCESS_PROT
138 
139 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
140 
141 /*
142  * This gets called at the end of handling a page fault, when
143  * the kernel has put a new PTE into the page table for the process.
144  * We use it to ensure coherency between the i-cache and d-cache
145  * for the page which has just been mapped in.
146  * On machines which use an MMU hash table, we use this to put a
147  * corresponding HPTE into the hash table ahead of time, instead of
148  * waiting for the inevitable extra hash-table miss exception.
149  */
150 static inline void update_mmu_cache_range(struct vm_fault *vmf,
151 		struct vm_area_struct *vma, unsigned long address,
152 		pte_t *ptep, unsigned int nr)
153 {
154 	if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
155 	    (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
156 		__update_mmu_cache(vma, address, ptep);
157 }
158 
159 /*
160  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
161  * so we are sure it is included when arriving here.
162  */
163 #ifdef PTE_FRAG_NR
164 static inline void *pte_frag_get(mm_context_t *ctx)
165 {
166 	return ctx->pte_frag;
167 }
168 
169 static inline void pte_frag_set(mm_context_t *ctx, void *p)
170 {
171 	ctx->pte_frag = p;
172 }
173 #else
174 #define PTE_FRAG_NR		1
175 #define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
176 #define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
177 
178 static inline void *pte_frag_get(mm_context_t *ctx)
179 {
180 	return NULL;
181 }
182 
183 static inline void pte_frag_set(mm_context_t *ctx, void *p)
184 {
185 }
186 #endif
187 
188 #define pmd_pgtable pmd_pgtable
189 static inline pgtable_t pmd_pgtable(pmd_t pmd)
190 {
191 	return (pgtable_t)pmd_page_vaddr(pmd);
192 }
193 
194 #ifdef CONFIG_PPC64
195 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
196 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
197 			   unsigned long page_size);
198 /*
199  * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
200  * some of the restrictions. We don't check for PMD_SIZE because our
201  * vmemmap allocation code can fallback correctly. The pageblock
202  * alignment requirement is met using altmap->reserve blocks.
203  */
204 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
205 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
206 {
207 	if (!radix_enabled())
208 		return false;
209 	/*
210 	 * With 4K page size and 2M PMD_SIZE, we can align
211 	 * things better with memory block size value
212 	 * starting from 128MB. Hence align things with PMD_SIZE.
213 	 */
214 	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
215 		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
216 	return true;
217 }
218 
219 #endif /* CONFIG_PPC64 */
220 
221 #ifndef pmd_user_accessible_page
222 #define pmd_user_accessible_page(pmd, addr)	false
223 #endif
224 
225 #ifndef pud_user_accessible_page
226 #define pud_user_accessible_page(pud, addr)	false
227 #endif
228 
229 #endif /* __ASSEMBLER__ */
230 
231 #endif /* _ASM_POWERPC_PGTABLE_H */
232