xref: /linux/arch/powerpc/include/asm/book3s/32/pgtable.h (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4 
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7 
8 #include <asm/book3s/32/hash.h>
9 
10 /* And here we include common definitions */
11 #include <asm/pte-common.h>
12 
13 #define PTE_INDEX_SIZE	PTE_SHIFT
14 #define PMD_INDEX_SIZE	0
15 #define PUD_INDEX_SIZE	0
16 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
17 
18 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
19 #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
20 
21 #ifndef __ASSEMBLY__
22 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
23 #define PMD_TABLE_SIZE	0
24 #define PUD_TABLE_SIZE	0
25 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
26 #endif	/* __ASSEMBLY__ */
27 
28 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
29 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
30 
31 /*
32  * The normal case is that PTEs are 32-bits and we have a 1-page
33  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
34  *
35  * For any >32-bit physical address platform, we can use the following
36  * two level page table layout where the pgdir is 8KB and the MS 13 bits
37  * are an index to the second level table.  The combined pgdir/pmd first
38  * level has 2048 entries and the second level has 512 64-bit PTE entries.
39  * -Matt
40  */
41 /* PGDIR_SHIFT determines what a top-level page table entry can map */
42 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
43 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
44 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
45 
46 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
47 /*
48  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
49  * value (for now) on others, from where we can start layout kernel
50  * virtual space that goes below PKMAP and FIXMAP
51  */
52 #ifdef CONFIG_HIGHMEM
53 #define KVIRT_TOP	PKMAP_BASE
54 #else
55 #define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
56 #endif
57 
58 /*
59  * ioremap_bot starts at that address. Early ioremaps move down from there,
60  * until mem_init() at which point this becomes the top of the vmalloc
61  * and ioremap space
62  */
63 #ifdef CONFIG_NOT_COHERENT_CACHE
64 #define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
65 #else
66 #define IOREMAP_TOP	KVIRT_TOP
67 #endif
68 
69 /*
70  * Just any arbitrary offset to the start of the vmalloc VM area: the
71  * current 16MB value just means that there will be a 64MB "hole" after the
72  * physical memory until the kernel virtual memory starts.  That means that
73  * any out-of-bounds memory accesses will hopefully be caught.
74  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
75  * area for the same reason. ;)
76  *
77  * We no longer map larger than phys RAM with the BATs so we don't have
78  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
79  * about clashes between our early calls to ioremap() that start growing down
80  * from ioremap_base being run into the VM area allocations (growing upwards
81  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
82  * we actually run into our mappings setup in the early boot with the VM
83  * system.  This really does become a problem for machines with good amounts
84  * of RAM.  -- Cort
85  */
86 #define VMALLOC_OFFSET (0x1000000) /* 16M */
87 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
88 #define VMALLOC_END	ioremap_bot
89 
90 #ifndef __ASSEMBLY__
91 #include <linux/sched.h>
92 #include <linux/threads.h>
93 
94 extern unsigned long ioremap_bot;
95 
96 /* Bits to mask out from a PGD to get to the PUD page */
97 #define PGD_MASKED_BITS		0
98 
99 #define pte_ERROR(e) \
100 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
101 		(unsigned long long)pte_val(e))
102 #define pgd_ERROR(e) \
103 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
104 /*
105  * Bits in a linux-style PTE.  These match the bits in the
106  * (hardware-defined) PowerPC PTE as closely as possible.
107  */
108 
109 #define pte_clear(mm, addr, ptep) \
110 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
111 
112 #define pmd_none(pmd)		(!pmd_val(pmd))
113 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
114 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
115 static inline void pmd_clear(pmd_t *pmdp)
116 {
117 	*pmdp = __pmd(0);
118 }
119 
120 
121 /*
122  * When flushing the tlb entry for a page, we also need to flush the hash
123  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
124  */
125 extern int flush_hash_pages(unsigned context, unsigned long va,
126 			    unsigned long pmdval, int count);
127 
128 /* Add an HPTE to the hash table */
129 extern void add_hash_page(unsigned context, unsigned long va,
130 			  unsigned long pmdval);
131 
132 /* Flush an entry from the TLB/hash table */
133 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
134 			     unsigned long address);
135 
136 /*
137  * PTE updates. This function is called whenever an existing
138  * valid PTE is updated. This does -not- include set_pte_at()
139  * which nowadays only sets a new PTE.
140  *
141  * Depending on the type of MMU, we may need to use atomic updates
142  * and the PTE may be either 32 or 64 bit wide. In the later case,
143  * when using atomic updates, only the low part of the PTE is
144  * accessed atomically.
145  *
146  * In addition, on 44x, we also maintain a global flag indicating
147  * that an executable user mapping was modified, which is needed
148  * to properly flush the virtually tagged instruction cache of
149  * those implementations.
150  */
151 #ifndef CONFIG_PTE_64BIT
152 static inline unsigned long pte_update(pte_t *p,
153 				       unsigned long clr,
154 				       unsigned long set)
155 {
156 	unsigned long old, tmp;
157 
158 	__asm__ __volatile__("\
159 1:	lwarx	%0,0,%3\n\
160 	andc	%1,%0,%4\n\
161 	or	%1,%1,%5\n"
162 "	stwcx.	%1,0,%3\n\
163 	bne-	1b"
164 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
165 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
166 	: "cc" );
167 
168 	return old;
169 }
170 #else /* CONFIG_PTE_64BIT */
171 static inline unsigned long long pte_update(pte_t *p,
172 					    unsigned long clr,
173 					    unsigned long set)
174 {
175 	unsigned long long old;
176 	unsigned long tmp;
177 
178 	__asm__ __volatile__("\
179 1:	lwarx	%L0,0,%4\n\
180 	lwzx	%0,0,%3\n\
181 	andc	%1,%L0,%5\n\
182 	or	%1,%1,%6\n"
183 "	stwcx.	%1,0,%4\n\
184 	bne-	1b"
185 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
186 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
187 	: "cc" );
188 
189 	return old;
190 }
191 #endif /* CONFIG_PTE_64BIT */
192 
193 /*
194  * 2.6 calls this without flushing the TLB entry; this is wrong
195  * for our hash-based implementation, we fix that up here.
196  */
197 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
198 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
199 {
200 	unsigned long old;
201 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
202 	if (old & _PAGE_HASHPTE) {
203 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
204 		flush_hash_pages(context, addr, ptephys, 1);
205 	}
206 	return (old & _PAGE_ACCESSED) != 0;
207 }
208 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
209 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
210 
211 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
212 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
213 				       pte_t *ptep)
214 {
215 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
216 }
217 
218 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
219 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
220 				      pte_t *ptep)
221 {
222 	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
223 }
224 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
225 					   unsigned long addr, pte_t *ptep)
226 {
227 	ptep_set_wrprotect(mm, addr, ptep);
228 }
229 
230 
231 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
232 					   pte_t *ptep, pte_t entry,
233 					   unsigned long address,
234 					   int psize)
235 {
236 	unsigned long set = pte_val(entry) &
237 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
238 	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
239 
240 	pte_update(ptep, clr, set);
241 
242 	flush_tlb_page(vma, address);
243 }
244 
245 #define __HAVE_ARCH_PTE_SAME
246 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
247 
248 /*
249  * Note that on Book E processors, the pmd contains the kernel virtual
250  * (lowmem) address of the pte page.  The physical address is less useful
251  * because everything runs with translation enabled (even the TLB miss
252  * handler).  On everything else the pmd contains the physical address
253  * of the pte page.  -- paulus
254  */
255 #ifndef CONFIG_BOOKE
256 #define pmd_page_vaddr(pmd)	\
257 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
258 #define pmd_page(pmd)		\
259 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
260 #else
261 #define pmd_page_vaddr(pmd)	\
262 	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
263 #define pmd_page(pmd)		\
264 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
265 #endif
266 
267 /* to find an entry in a kernel page-table-directory */
268 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
269 
270 /* to find an entry in a page-table-directory */
271 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
272 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
273 
274 /* Find an entry in the third-level page table.. */
275 #define pte_index(address)		\
276 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
277 #define pte_offset_kernel(dir, addr)	\
278 	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
279 #define pte_offset_map(dir, addr)		\
280 	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
281 #define pte_unmap(pte)		kunmap_atomic(pte)
282 
283 /*
284  * Encode and decode a swap entry.
285  * Note that the bits we use in a PTE for representing a swap entry
286  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
287  *   -- paulus
288  */
289 #define __swp_type(entry)		((entry).val & 0x1f)
290 #define __swp_offset(entry)		((entry).val >> 5)
291 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
292 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
293 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
294 
295 int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
296 
297 /* Generic accessors to PTE bits */
298 static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
299 static inline int pte_read(pte_t pte)		{ return 1; }
300 static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
301 static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
302 static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
303 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
304 static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
305 
306 static inline int pte_present(pte_t pte)
307 {
308 	return pte_val(pte) & _PAGE_PRESENT;
309 }
310 
311 /*
312  * We only find page table entry in the last level
313  * Hence no need for other accessors
314  */
315 #define pte_access_permitted pte_access_permitted
316 static inline bool pte_access_permitted(pte_t pte, bool write)
317 {
318 	unsigned long pteval = pte_val(pte);
319 	/*
320 	 * A read-only access is controlled by _PAGE_USER bit.
321 	 * We have _PAGE_READ set for WRITE and EXECUTE
322 	 */
323 	unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
324 
325 	if (write)
326 		need_pte_bits |= _PAGE_WRITE;
327 
328 	if ((pteval & need_pte_bits) != need_pte_bits)
329 		return false;
330 
331 	return true;
332 }
333 
334 /* Conversion functions: convert a page and protection to a page entry,
335  * and a page entry and page directory to the page they refer to.
336  *
337  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
338  * long for now.
339  */
340 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
341 {
342 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
343 		     pgprot_val(pgprot));
344 }
345 
346 static inline unsigned long pte_pfn(pte_t pte)
347 {
348 	return pte_val(pte) >> PTE_RPN_SHIFT;
349 }
350 
351 /* Generic modifiers for PTE bits */
352 static inline pte_t pte_wrprotect(pte_t pte)
353 {
354 	return __pte(pte_val(pte) & ~_PAGE_RW);
355 }
356 
357 static inline pte_t pte_mkclean(pte_t pte)
358 {
359 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
360 }
361 
362 static inline pte_t pte_mkold(pte_t pte)
363 {
364 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
365 }
366 
367 static inline pte_t pte_mkwrite(pte_t pte)
368 {
369 	return __pte(pte_val(pte) | _PAGE_RW);
370 }
371 
372 static inline pte_t pte_mkdirty(pte_t pte)
373 {
374 	return __pte(pte_val(pte) | _PAGE_DIRTY);
375 }
376 
377 static inline pte_t pte_mkyoung(pte_t pte)
378 {
379 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
380 }
381 
382 static inline pte_t pte_mkspecial(pte_t pte)
383 {
384 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
385 }
386 
387 static inline pte_t pte_mkhuge(pte_t pte)
388 {
389 	return pte;
390 }
391 
392 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
393 {
394 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
395 }
396 
397 
398 
399 /* This low level function performs the actual PTE insertion
400  * Setting the PTE depends on the MMU type and other factors. It's
401  * an horrible mess that I'm not going to try to clean up now but
402  * I'm keeping it in one place rather than spread around
403  */
404 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
405 				pte_t *ptep, pte_t pte, int percpu)
406 {
407 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
408 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
409 	 * helper pte_update() which does an atomic update. We need to do that
410 	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
411 	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
412 	 * the hash bits instead (ie, same as the non-SMP case)
413 	 */
414 	if (percpu)
415 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
416 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
417 	else
418 		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
419 
420 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
421 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
422 	 * can just store as long as we do the two halves in the right order
423 	 * with a barrier in between. This is possible because we take care,
424 	 * in the hash code, to pre-invalidate if the PTE was already hashed,
425 	 * which synchronizes us with any concurrent invalidation.
426 	 * In the percpu case, we also fallback to the simple update preserving
427 	 * the hash bits
428 	 */
429 	if (percpu) {
430 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
431 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
432 		return;
433 	}
434 	if (pte_val(*ptep) & _PAGE_HASHPTE)
435 		flush_hash_entry(mm, ptep, addr);
436 	__asm__ __volatile__("\
437 		stw%U0%X0 %2,%0\n\
438 		eieio\n\
439 		stw%U0%X0 %L2,%1"
440 	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
441 	: "r" (pte) : "memory");
442 
443 #elif defined(CONFIG_PPC_STD_MMU_32)
444 	/* Third case is 32-bit hash table in UP mode, we need to preserve
445 	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
446 	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
447 	 * and see we need to keep track that this PTE needs invalidating
448 	 */
449 	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
450 		      | (pte_val(pte) & ~_PAGE_HASHPTE));
451 
452 #else
453 #error "Not supported "
454 #endif
455 }
456 
457 /*
458  * Macro to mark a page protection value as "uncacheable".
459  */
460 
461 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
462 			 _PAGE_WRITETHRU)
463 
464 #define pgprot_noncached pgprot_noncached
465 static inline pgprot_t pgprot_noncached(pgprot_t prot)
466 {
467 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
468 			_PAGE_NO_CACHE | _PAGE_GUARDED);
469 }
470 
471 #define pgprot_noncached_wc pgprot_noncached_wc
472 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
473 {
474 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
475 			_PAGE_NO_CACHE);
476 }
477 
478 #define pgprot_cached pgprot_cached
479 static inline pgprot_t pgprot_cached(pgprot_t prot)
480 {
481 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
482 			_PAGE_COHERENT);
483 }
484 
485 #define pgprot_cached_wthru pgprot_cached_wthru
486 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
487 {
488 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
489 			_PAGE_COHERENT | _PAGE_WRITETHRU);
490 }
491 
492 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
493 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
494 {
495 	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
496 }
497 
498 #define pgprot_writecombine pgprot_writecombine
499 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
500 {
501 	return pgprot_noncached_wc(prot);
502 }
503 
504 #endif /* !__ASSEMBLY__ */
505 
506 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
507