xref: /linux/arch/powerpc/include/asm/page.h (revision 23c48a124b469cee2eb0c75e6d22d366d1caa118)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PAGE_H
3 #define _ASM_POWERPC_PAGE_H
4 
5 /*
6  * Copyright (C) 2001,2005 IBM Corporation.
7  */
8 
9 #ifndef __ASSEMBLY__
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #else
13 #include <asm/types.h>
14 #endif
15 #include <asm/asm-const.h>
16 
17 /*
18  * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
19  * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
20  * page size. When using 64K pages however, whether we are really supporting
21  * 64K pages in HW or not is irrelevant to those definitions.
22  */
23 #define PAGE_SHIFT		CONFIG_PPC_PAGE_SHIFT
24 #define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
25 
26 #ifndef __ASSEMBLY__
27 #ifndef CONFIG_HUGETLB_PAGE
28 #define HPAGE_SHIFT PAGE_SHIFT
29 #elif defined(CONFIG_PPC_BOOK3S_64)
30 extern unsigned int hpage_shift;
31 #define HPAGE_SHIFT hpage_shift
32 #elif defined(CONFIG_PPC_8xx)
33 #define HPAGE_SHIFT		19	/* 512k pages */
34 #elif defined(CONFIG_PPC_FSL_BOOK3E)
35 #define HPAGE_SHIFT		22	/* 4M pages */
36 #endif
37 #define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
38 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
39 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
40 #define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
41 #endif
42 
43 /*
44  * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
45  * assign PAGE_MASK to a larger type it gets extended the way we want
46  * (i.e. with 1s in the high bits)
47  */
48 #define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
49 
50 /*
51  * KERNELBASE is the virtual address of the start of the kernel, it's often
52  * the same as PAGE_OFFSET, but _might not be_.
53  *
54  * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
55  *
56  * PAGE_OFFSET is the virtual address of the start of lowmem.
57  *
58  * PHYSICAL_START is the physical address of the start of the kernel.
59  *
60  * MEMORY_START is the physical address of the start of lowmem.
61  *
62  * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
63  * ppc32 and based on how they are set we determine MEMORY_START.
64  *
65  * For the linear mapping the following equation should be true:
66  * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
67  *
68  * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
69  *
70  * There are two ways to determine a physical address from a virtual one:
71  * va = pa + PAGE_OFFSET - MEMORY_START
72  * va = pa + KERNELBASE - PHYSICAL_START
73  *
74  * If you want to know something's offset from the start of the kernel you
75  * should subtract KERNELBASE.
76  *
77  * If you want to test if something's a kernel address, use is_kernel_addr().
78  */
79 
80 #define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
81 #define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
82 #define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
83 
84 #if defined(CONFIG_NONSTATIC_KERNEL)
85 #ifndef __ASSEMBLY__
86 
87 extern phys_addr_t memstart_addr;
88 extern phys_addr_t kernstart_addr;
89 
90 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
91 extern long long virt_phys_offset;
92 #endif
93 
94 #endif /* __ASSEMBLY__ */
95 #define PHYSICAL_START	kernstart_addr
96 
97 #else	/* !CONFIG_NONSTATIC_KERNEL */
98 #define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
99 #endif
100 
101 /* See Description below for VIRT_PHYS_OFFSET */
102 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103 #ifdef CONFIG_RELOCATABLE
104 #define VIRT_PHYS_OFFSET virt_phys_offset
105 #else
106 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107 #endif
108 #endif
109 
110 #ifdef CONFIG_PPC64
111 #define MEMORY_START	0UL
112 #elif defined(CONFIG_NONSTATIC_KERNEL)
113 #define MEMORY_START	memstart_addr
114 #else
115 #define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116 #endif
117 
118 #ifdef CONFIG_FLATMEM
119 #define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120 #ifndef __ASSEMBLY__
121 extern unsigned long max_mapnr;
122 static inline bool pfn_valid(unsigned long pfn)
123 {
124 	unsigned long min_pfn = ARCH_PFN_OFFSET;
125 
126 	return pfn >= min_pfn && pfn < max_mapnr;
127 }
128 #endif
129 #endif
130 
131 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
132 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
133 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
134 
135 #define virt_addr_valid(vaddr)	({					\
136 	unsigned long _addr = (unsigned long)vaddr;			\
137 	_addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&	\
138 	pfn_valid(virt_to_pfn(_addr));					\
139 })
140 
141 /*
142  * On Book-E parts we need __va to parse the device tree and we can't
143  * determine MEMORY_START until then.  However we can determine PHYSICAL_START
144  * from information at hand (program counter, TLB lookup).
145  *
146  * On BookE with RELOCATABLE && PPC32
147  *
148  *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
149  *   address without any restriction on the page alignment.
150  *
151  *   We find the runtime address of _stext and relocate ourselves based on
152  *   the following calculation:
153  *
154  *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
155  *  				MODULO(_stext.run,256M)
156  *   and create the following mapping:
157  *
158  * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
159  *
160  *   When we process relocations, we cannot depend on the
161  *   existing equation for the __va()/__pa() translations:
162  *
163  * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
164  *
165  *   Where:
166  *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
167  *  	 KERNELBASE = Compiled virtual address of _stext.
168  *
169  *   This formula holds true iff, kernel load address is TLB page aligned.
170  *
171  *   In our case, we need to also account for the shift in the kernel Virtual
172  *   address.
173  *
174  *   E.g.,
175  *
176  *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
177  *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
178  *
179  *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
180  *                 = 0xbc100000 , which is wrong.
181  *
182  *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
183  *      	according to our mapping.
184  *
185  *   Hence we use the following formula to get the translations right:
186  *
187  * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
188  *
189  * 	  Where :
190  * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
191  * 		Effective KERNELBASE = virtual_base =
192  * 				     = ALIGN_DOWN(KERNELBASE,256M) +
193  * 						MODULO(PHYSICAL_START,256M)
194  *
195  * 	To make the cost of __va() / __pa() more light weight, we introduce
196  * 	a new variable virt_phys_offset, which will hold :
197  *
198  * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
199  * 			 = ALIGN_DOWN(KERNELBASE,256M) -
200  * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
201  *
202  * 	Hence :
203  *
204  * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
205  * 		= x + virt_phys_offset
206  *
207  * 		and
208  * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
209  * 		= x - virt_phys_offset
210  *
211  * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
212  * the other definitions for __va & __pa.
213  */
214 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
215 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
216 #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
217 #else
218 #ifdef CONFIG_PPC64
219 
220 #define VIRTUAL_WARN_ON(x)	WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
221 
222 /*
223  * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
224  * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
225  * This also results in better code generation.
226  */
227 #define __va(x)								\
228 ({									\
229 	VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET);		\
230 	(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);	\
231 })
232 
233 #define __pa(x)								\
234 ({									\
235 	VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET);		\
236 	(unsigned long)(x) & 0x0fffffffffffffffUL;			\
237 })
238 
239 #else /* 32-bit, non book E */
240 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
241 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
242 #endif
243 #endif
244 
245 /*
246  * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
247  * and needs to be executable.  This means the whole heap ends
248  * up being executable.
249  */
250 #define VM_DATA_DEFAULT_FLAGS32	VM_DATA_FLAGS_TSK_EXEC
251 #define VM_DATA_DEFAULT_FLAGS64	VM_DATA_FLAGS_NON_EXEC
252 
253 #ifdef __powerpc64__
254 #include <asm/page_64.h>
255 #else
256 #include <asm/page_32.h>
257 #endif
258 
259 /*
260  * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
261  * "kernelness", use is_kernel_addr() - it should do what you want.
262  */
263 #ifdef CONFIG_PPC_BOOK3E_64
264 #define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
265 #elif defined(CONFIG_PPC_BOOK3S_64)
266 #define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
267 #else
268 #define is_kernel_addr(x)	((x) >= TASK_SIZE)
269 #endif
270 
271 #ifndef CONFIG_PPC_BOOK3S_64
272 /*
273  * Use the top bit of the higher-level page table entries to indicate whether
274  * the entries we point to contain hugepages.  This works because we know that
275  * the page tables live in kernel space.  If we ever decide to support having
276  * page tables at arbitrary addresses, this breaks and will have to change.
277  */
278 #ifdef CONFIG_PPC64
279 #define PD_HUGE 0x8000000000000000UL
280 #else
281 #define PD_HUGE 0x80000000
282 #endif
283 
284 #else	/* CONFIG_PPC_BOOK3S_64 */
285 /*
286  * Book3S 64 stores real addresses in the hugepd entries to
287  * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
288  */
289 #define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
290 #endif /* CONFIG_PPC_BOOK3S_64 */
291 
292 /*
293  * Some number of bits at the level of the page table that points to
294  * a hugepte are used to encode the size.  This masks those bits.
295  * On 8xx, HW assistance requires 4k alignment for the hugepte.
296  */
297 #ifdef CONFIG_PPC_8xx
298 #define HUGEPD_SHIFT_MASK     0xfff
299 #else
300 #define HUGEPD_SHIFT_MASK     0x3f
301 #endif
302 
303 #ifndef __ASSEMBLY__
304 
305 #ifdef CONFIG_PPC_BOOK3S_64
306 #include <asm/pgtable-be-types.h>
307 #else
308 #include <asm/pgtable-types.h>
309 #endif
310 
311 
312 #ifndef CONFIG_HUGETLB_PAGE
313 #define is_hugepd(pdep)		(0)
314 #define pgd_huge(pgd)		(0)
315 #endif /* CONFIG_HUGETLB_PAGE */
316 
317 struct page;
318 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
319 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
320 		struct page *p);
321 extern int devmem_is_allowed(unsigned long pfn);
322 
323 #ifdef CONFIG_PPC_SMLPAR
324 void arch_free_page(struct page *page, int order);
325 #define HAVE_ARCH_FREE_PAGE
326 #endif
327 
328 struct vm_area_struct;
329 
330 extern unsigned long kernstart_virt_addr;
331 
332 static inline unsigned long kaslr_offset(void)
333 {
334 	return kernstart_virt_addr - KERNELBASE;
335 }
336 
337 #include <asm-generic/memory_model.h>
338 #endif /* __ASSEMBLY__ */
339 
340 #endif /* _ASM_POWERPC_PAGE_H */
341