xref: /linux/arch/powerpc/include/asm/pgtable.h (revision 4bbdb725a36b0d235f3b832bd0c1e885f0442d9f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
4 
5 #ifndef __ASSEMBLY__
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h>		/* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
11 #include <asm/tlbflush.h>
12 
13 struct mm_struct;
14 
15 #endif /* !__ASSEMBLY__ */
16 
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
19 #else
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
22 
23 /*
24  * Protection used for kernel text. We want the debuggers to be able to
25  * set breakpoints anywhere, so don't write protect the kernel text
26  * on platforms where such control is possible.
27  */
28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
31 #else
32 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
33 #endif
34 
35 /* Make modules code happy. We don't set RO yet */
36 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
37 
38 /* Advertise special mapping type for AGP */
39 #define PAGE_AGP		(PAGE_KERNEL_NC)
40 #define HAVE_PAGE_AGP
41 
42 #ifndef __ASSEMBLY__
43 
44 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
45 		pte_t pte, unsigned int nr);
46 #define set_ptes set_ptes
47 #define update_mmu_cache(vma, addr, ptep) \
48 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
49 
50 #ifndef MAX_PTRS_PER_PGD
51 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
52 #endif
53 
54 /* Keep these as a macros to avoid include dependency mess */
55 #define pte_page(x)		pfn_to_page(pte_pfn(x))
56 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
57 
58 static inline unsigned long pte_pfn(pte_t pte)
59 {
60 	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
61 }
62 
63 /*
64  * Select all bits except the pfn
65  */
66 static inline pgprot_t pte_pgprot(pte_t pte)
67 {
68 	unsigned long pte_flags;
69 
70 	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
71 	return __pgprot(pte_flags);
72 }
73 
74 static inline pgprot_t pgprot_nx(pgprot_t prot)
75 {
76 	return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
77 }
78 #define pgprot_nx pgprot_nx
79 
80 #ifndef pmd_page_vaddr
81 static inline const void *pmd_page_vaddr(pmd_t pmd)
82 {
83 	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
84 }
85 #define pmd_page_vaddr pmd_page_vaddr
86 #endif
87 /*
88  * ZERO_PAGE is a global shared page that is always zero: used
89  * for zero-mapped memory areas etc..
90  */
91 extern unsigned long empty_zero_page[];
92 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
93 
94 extern pgd_t swapper_pg_dir[];
95 
96 extern void paging_init(void);
97 void poking_init(void);
98 
99 extern unsigned long ioremap_bot;
100 extern const pgprot_t protection_map[16];
101 
102 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
103 #define pmd_large(pmd)		0
104 #endif
105 
106 /* can we use this in kvm */
107 unsigned long vmalloc_to_phys(void *vmalloc_addr);
108 
109 void pgtable_cache_add(unsigned int shift);
110 
111 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
112 
113 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
114 void mark_initmem_nx(void);
115 #else
116 static inline void mark_initmem_nx(void) { }
117 #endif
118 
119 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
120 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
121 			  pte_t *ptep, pte_t entry, int dirty);
122 
123 struct file;
124 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
125 			      unsigned long size, pgprot_t vma_prot);
126 #define __HAVE_PHYS_MEM_ACCESS_PROT
127 
128 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
129 
130 /*
131  * This gets called at the end of handling a page fault, when
132  * the kernel has put a new PTE into the page table for the process.
133  * We use it to ensure coherency between the i-cache and d-cache
134  * for the page which has just been mapped in.
135  * On machines which use an MMU hash table, we use this to put a
136  * corresponding HPTE into the hash table ahead of time, instead of
137  * waiting for the inevitable extra hash-table miss exception.
138  */
139 static inline void update_mmu_cache_range(struct vm_fault *vmf,
140 		struct vm_area_struct *vma, unsigned long address,
141 		pte_t *ptep, unsigned int nr)
142 {
143 	if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
144 	    (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
145 		__update_mmu_cache(vma, address, ptep);
146 }
147 
148 /*
149  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
150  * so we are sure it is included when arriving here.
151  */
152 #ifdef PTE_FRAG_NR
153 static inline void *pte_frag_get(mm_context_t *ctx)
154 {
155 	return ctx->pte_frag;
156 }
157 
158 static inline void pte_frag_set(mm_context_t *ctx, void *p)
159 {
160 	ctx->pte_frag = p;
161 }
162 #else
163 #define PTE_FRAG_NR		1
164 #define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
165 #define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
166 
167 static inline void *pte_frag_get(mm_context_t *ctx)
168 {
169 	return NULL;
170 }
171 
172 static inline void pte_frag_set(mm_context_t *ctx, void *p)
173 {
174 }
175 #endif
176 
177 #ifndef pmd_is_leaf
178 #define pmd_is_leaf pmd_is_leaf
179 static inline bool pmd_is_leaf(pmd_t pmd)
180 {
181 	return false;
182 }
183 #endif
184 
185 #ifndef pud_is_leaf
186 #define pud_is_leaf pud_is_leaf
187 static inline bool pud_is_leaf(pud_t pud)
188 {
189 	return false;
190 }
191 #endif
192 
193 #ifndef p4d_is_leaf
194 #define p4d_is_leaf p4d_is_leaf
195 static inline bool p4d_is_leaf(p4d_t p4d)
196 {
197 	return false;
198 }
199 #endif
200 
201 #define pmd_pgtable pmd_pgtable
202 static inline pgtable_t pmd_pgtable(pmd_t pmd)
203 {
204 	return (pgtable_t)pmd_page_vaddr(pmd);
205 }
206 
207 #ifdef CONFIG_PPC64
208 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
209 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
210 			   unsigned long page_size);
211 /*
212  * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
213  * some of the restrictions. We don't check for PMD_SIZE because our
214  * vmemmap allocation code can fallback correctly. The pageblock
215  * alignment requirement is met using altmap->reserve blocks.
216  */
217 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
218 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
219 {
220 	if (!radix_enabled())
221 		return false;
222 	/*
223 	 * With 4K page size and 2M PMD_SIZE, we can align
224 	 * things better with memory block size value
225 	 * starting from 128MB. Hence align things with PMD_SIZE.
226 	 */
227 	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
228 		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
229 	return true;
230 }
231 
232 #endif /* CONFIG_PPC64 */
233 
234 #endif /* __ASSEMBLY__ */
235 
236 #endif /* _ASM_POWERPC_PGTABLE_H */
237