xref: /linux/arch/powerpc/include/asm/nohash/32/pgtable.h (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4 
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7 
8 #ifndef __ASSEMBLY__
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
12 #include <asm/asm-405.h>
13 
14 extern unsigned long ioremap_bot;
15 
16 #ifdef CONFIG_44x
17 extern int icache_44x_need_flush;
18 #endif
19 
20 #endif /* __ASSEMBLY__ */
21 
22 #define PTE_INDEX_SIZE	PTE_SHIFT
23 #define PMD_INDEX_SIZE	0
24 #define PUD_INDEX_SIZE	0
25 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
26 
27 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
28 #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
29 
30 #ifndef __ASSEMBLY__
31 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
32 #define PMD_TABLE_SIZE	0
33 #define PUD_TABLE_SIZE	0
34 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
35 #endif	/* __ASSEMBLY__ */
36 
37 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
38 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
39 
40 /*
41  * The normal case is that PTEs are 32-bits and we have a 1-page
42  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
43  *
44  * For any >32-bit physical address platform, we can use the following
45  * two level page table layout where the pgdir is 8KB and the MS 13 bits
46  * are an index to the second level table.  The combined pgdir/pmd first
47  * level has 2048 entries and the second level has 512 64-bit PTE entries.
48  * -Matt
49  */
50 /* PGDIR_SHIFT determines what a top-level page table entry can map */
51 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
52 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
53 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
54 
55 /* Bits to mask out from a PGD to get to the PUD page */
56 #define PGD_MASKED_BITS		0
57 
58 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
59 #define FIRST_USER_ADDRESS	0UL
60 
61 #define pte_ERROR(e) \
62 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
63 		(unsigned long long)pte_val(e))
64 #define pgd_ERROR(e) \
65 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
66 
67 #ifndef __ASSEMBLY__
68 
69 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
70 
71 #endif /* !__ASSEMBLY__ */
72 
73 
74 /*
75  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
76  * value (for now) on others, from where we can start layout kernel
77  * virtual space that goes below PKMAP and FIXMAP
78  */
79 #include <asm/fixmap.h>
80 
81 #ifdef CONFIG_HIGHMEM
82 #define KVIRT_TOP	PKMAP_BASE
83 #else
84 #define KVIRT_TOP	FIXADDR_START
85 #endif
86 
87 /*
88  * ioremap_bot starts at that address. Early ioremaps move down from there,
89  * until mem_init() at which point this becomes the top of the vmalloc
90  * and ioremap space
91  */
92 #ifdef CONFIG_NOT_COHERENT_CACHE
93 #define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
94 #else
95 #define IOREMAP_TOP	KVIRT_TOP
96 #endif
97 
98 /*
99  * Just any arbitrary offset to the start of the vmalloc VM area: the
100  * current 16MB value just means that there will be a 64MB "hole" after the
101  * physical memory until the kernel virtual memory starts.  That means that
102  * any out-of-bounds memory accesses will hopefully be caught.
103  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
104  * area for the same reason. ;)
105  *
106  * We no longer map larger than phys RAM with the BATs so we don't have
107  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
108  * about clashes between our early calls to ioremap() that start growing down
109  * from IOREMAP_TOP being run into the VM area allocations (growing upwards
110  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
111  * we actually run into our mappings setup in the early boot with the VM
112  * system.  This really does become a problem for machines with good amounts
113  * of RAM.  -- Cort
114  */
115 #define VMALLOC_OFFSET (0x1000000) /* 16M */
116 #ifdef PPC_PIN_SIZE
117 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
118 #else
119 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
120 #endif
121 #define VMALLOC_END	ioremap_bot
122 
123 /*
124  * Bits in a linux-style PTE.  These match the bits in the
125  * (hardware-defined) PowerPC PTE as closely as possible.
126  */
127 
128 #if defined(CONFIG_40x)
129 #include <asm/nohash/32/pte-40x.h>
130 #elif defined(CONFIG_44x)
131 #include <asm/nohash/32/pte-44x.h>
132 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
133 #include <asm/nohash/pte-book3e.h>
134 #elif defined(CONFIG_FSL_BOOKE)
135 #include <asm/nohash/32/pte-fsl-booke.h>
136 #elif defined(CONFIG_PPC_8xx)
137 #include <asm/nohash/32/pte-8xx.h>
138 #endif
139 
140 /*
141  * Location of the PFN in the PTE. Most 32-bit platforms use the same
142  * as _PAGE_SHIFT here (ie, naturally aligned).
143  * Platform who don't just pre-define the value so we don't override it here.
144  */
145 #ifndef PTE_RPN_SHIFT
146 #define PTE_RPN_SHIFT	(PAGE_SHIFT)
147 #endif
148 
149 /*
150  * The mask covered by the RPN must be a ULL on 32-bit platforms with
151  * 64-bit PTEs.
152  */
153 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
154 #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
155 #else
156 #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
157 #endif
158 
159 /*
160  * _PAGE_CHG_MASK masks of bits that are to be preserved across
161  * pgprot changes.
162  */
163 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
164 
165 #ifndef __ASSEMBLY__
166 
167 #define pte_clear(mm, addr, ptep) \
168 	do { pte_update(ptep, ~0, 0); } while (0)
169 
170 #ifndef pte_mkwrite
171 static inline pte_t pte_mkwrite(pte_t pte)
172 {
173 	return __pte(pte_val(pte) | _PAGE_RW);
174 }
175 #endif
176 
177 static inline pte_t pte_mkdirty(pte_t pte)
178 {
179 	return __pte(pte_val(pte) | _PAGE_DIRTY);
180 }
181 
182 static inline pte_t pte_mkyoung(pte_t pte)
183 {
184 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
185 }
186 
187 #ifndef pte_wrprotect
188 static inline pte_t pte_wrprotect(pte_t pte)
189 {
190 	return __pte(pte_val(pte) & ~_PAGE_RW);
191 }
192 #endif
193 
194 static inline pte_t pte_mkexec(pte_t pte)
195 {
196 	return __pte(pte_val(pte) | _PAGE_EXEC);
197 }
198 
199 #define pmd_none(pmd)		(!pmd_val(pmd))
200 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
201 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
202 static inline void pmd_clear(pmd_t *pmdp)
203 {
204 	*pmdp = __pmd(0);
205 }
206 
207 
208 
209 /*
210  * PTE updates. This function is called whenever an existing
211  * valid PTE is updated. This does -not- include set_pte_at()
212  * which nowadays only sets a new PTE.
213  *
214  * Depending on the type of MMU, we may need to use atomic updates
215  * and the PTE may be either 32 or 64 bit wide. In the later case,
216  * when using atomic updates, only the low part of the PTE is
217  * accessed atomically.
218  *
219  * In addition, on 44x, we also maintain a global flag indicating
220  * that an executable user mapping was modified, which is needed
221  * to properly flush the virtually tagged instruction cache of
222  * those implementations.
223  */
224 #ifndef CONFIG_PTE_64BIT
225 static inline unsigned long pte_update(pte_t *p,
226 				       unsigned long clr,
227 				       unsigned long set)
228 {
229 #ifdef PTE_ATOMIC_UPDATES
230 	unsigned long old, tmp;
231 
232 	__asm__ __volatile__("\
233 1:	lwarx	%0,0,%3\n\
234 	andc	%1,%0,%4\n\
235 	or	%1,%1,%5\n"
236 	PPC405_ERR77(0,%3)
237 "	stwcx.	%1,0,%3\n\
238 	bne-	1b"
239 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
240 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
241 	: "cc" );
242 #else /* PTE_ATOMIC_UPDATES */
243 	unsigned long old = pte_val(*p);
244 	unsigned long new = (old & ~clr) | set;
245 
246 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
247 	p->pte = p->pte1 = p->pte2 = p->pte3 = new;
248 #else
249 	*p = __pte(new);
250 #endif
251 #endif /* !PTE_ATOMIC_UPDATES */
252 
253 #ifdef CONFIG_44x
254 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
255 		icache_44x_need_flush = 1;
256 #endif
257 	return old;
258 }
259 #else /* CONFIG_PTE_64BIT */
260 static inline unsigned long long pte_update(pte_t *p,
261 					    unsigned long clr,
262 					    unsigned long set)
263 {
264 #ifdef PTE_ATOMIC_UPDATES
265 	unsigned long long old;
266 	unsigned long tmp;
267 
268 	__asm__ __volatile__("\
269 1:	lwarx	%L0,0,%4\n\
270 	lwzx	%0,0,%3\n\
271 	andc	%1,%L0,%5\n\
272 	or	%1,%1,%6\n"
273 	PPC405_ERR77(0,%3)
274 "	stwcx.	%1,0,%4\n\
275 	bne-	1b"
276 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
277 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
278 	: "cc" );
279 #else /* PTE_ATOMIC_UPDATES */
280 	unsigned long long old = pte_val(*p);
281 	*p = __pte((old & ~(unsigned long long)clr) | set);
282 #endif /* !PTE_ATOMIC_UPDATES */
283 
284 #ifdef CONFIG_44x
285 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
286 		icache_44x_need_flush = 1;
287 #endif
288 	return old;
289 }
290 #endif /* CONFIG_PTE_64BIT */
291 
292 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
293 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
294 {
295 	unsigned long old;
296 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
297 	return (old & _PAGE_ACCESSED) != 0;
298 }
299 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
300 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
301 
302 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
303 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
304 				       pte_t *ptep)
305 {
306 	return __pte(pte_update(ptep, ~0, 0));
307 }
308 
309 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
310 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
311 				      pte_t *ptep)
312 {
313 	unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
314 	unsigned long set = pte_val(pte_wrprotect(__pte(0)));
315 
316 	pte_update(ptep, clr, set);
317 }
318 
319 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
320 					   pte_t *ptep, pte_t entry,
321 					   unsigned long address,
322 					   int psize)
323 {
324 	pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
325 	pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
326 	unsigned long set = pte_val(entry) & pte_val(pte_set);
327 	unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
328 
329 	pte_update(ptep, clr, set);
330 
331 	flush_tlb_page(vma, address);
332 }
333 
334 static inline int pte_young(pte_t pte)
335 {
336 	return pte_val(pte) & _PAGE_ACCESSED;
337 }
338 
339 #define __HAVE_ARCH_PTE_SAME
340 #define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
341 
342 /*
343  * Note that on Book E processors, the pmd contains the kernel virtual
344  * (lowmem) address of the pte page.  The physical address is less useful
345  * because everything runs with translation enabled (even the TLB miss
346  * handler).  On everything else the pmd contains the physical address
347  * of the pte page.  -- paulus
348  */
349 #ifndef CONFIG_BOOKE
350 #define pmd_page_vaddr(pmd)	\
351 	((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
352 #define pmd_page(pmd)		\
353 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
354 #else
355 #define pmd_page_vaddr(pmd)	\
356 	((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
357 #define pmd_page(pmd)		\
358 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
359 #endif
360 
361 /* to find an entry in a kernel page-table-directory */
362 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
363 
364 /* to find an entry in a page-table-directory */
365 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
366 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
367 
368 /* Find an entry in the third-level page table.. */
369 #define pte_index(address)		\
370 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
371 #define pte_offset_kernel(dir, addr)	\
372 	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
373 				  pte_index(addr))
374 #define pte_offset_map(dir, addr)		\
375 	((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
376 		   (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
377 #define pte_unmap(pte)		kunmap_atomic(pte)
378 
379 /*
380  * Encode and decode a swap entry.
381  * Note that the bits we use in a PTE for representing a swap entry
382  * must not include the _PAGE_PRESENT bit.
383  *   -- paulus
384  */
385 #define __swp_type(entry)		((entry).val & 0x1f)
386 #define __swp_offset(entry)		((entry).val >> 5)
387 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
388 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
389 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
390 
391 #endif /* !__ASSEMBLY__ */
392 
393 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
394