xref: /linux/arch/s390/include/asm/page.h (revision 364eeb79a213fcf9164208b53764223ad522d6b3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  */
7 
8 #ifndef _S390_PAGE_H
9 #define _S390_PAGE_H
10 
11 #include <linux/const.h>
12 #include <asm/types.h>
13 #include <asm/asm.h>
14 
15 #define _PAGE_SHIFT	CONFIG_PAGE_SHIFT
16 #define _PAGE_SIZE	(_AC(1, UL) << _PAGE_SHIFT)
17 #define _PAGE_MASK	(~(_PAGE_SIZE - 1))
18 
19 /* PAGE_SHIFT determines the page size */
20 #define PAGE_SHIFT	_PAGE_SHIFT
21 #define PAGE_SIZE	_PAGE_SIZE
22 #define PAGE_MASK	_PAGE_MASK
23 #define PAGE_DEFAULT_ACC	_AC(0, UL)
24 /* storage-protection override */
25 #define PAGE_SPO_ACC		9
26 #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
27 
28 #define HPAGE_SHIFT	20
29 #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
30 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
31 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
32 #define HUGE_MAX_HSTATE		2
33 
34 #define ARCH_HAS_SETCLEAR_HUGE_PTE
35 #define ARCH_HAS_HUGE_PTE_TYPE
36 #define ARCH_HAS_PREPARE_HUGEPAGE
37 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
38 
39 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
40 
41 #include <asm/setup.h>
42 #ifndef __ASSEMBLY__
43 
44 void __storage_key_init_range(unsigned long start, unsigned long end);
45 
46 static inline void storage_key_init_range(unsigned long start, unsigned long end)
47 {
48 	if (PAGE_DEFAULT_KEY != 0)
49 		__storage_key_init_range(start, end);
50 }
51 
52 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
53 
54 /*
55  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
56  * bypass caches when copying a page. Especially when copying huge pages
57  * this keeps L1 and L2 data caches alive.
58  */
59 static inline void copy_page(void *to, void *from)
60 {
61 	union register_pair dst, src;
62 
63 	dst.even = (unsigned long) to;
64 	dst.odd  = 0x1000;
65 	src.even = (unsigned long) from;
66 	src.odd  = 0xb0001000;
67 
68 	asm volatile(
69 		"	mvcl	%[dst],%[src]"
70 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
71 		: : "memory", "cc");
72 }
73 
74 #define clear_user_page(page, vaddr, pg)	clear_page(page)
75 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
76 
77 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
78 	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
79 
80 /*
81  * These are used to make use of C type-checking..
82  */
83 
84 typedef struct { unsigned long pgprot; } pgprot_t;
85 typedef struct { unsigned long pgste; } pgste_t;
86 typedef struct { unsigned long pte; } pte_t;
87 typedef struct { unsigned long pmd; } pmd_t;
88 typedef struct { unsigned long pud; } pud_t;
89 typedef struct { unsigned long p4d; } p4d_t;
90 typedef struct { unsigned long pgd; } pgd_t;
91 typedef pte_t *pgtable_t;
92 
93 #define pgprot_val(x)	((x).pgprot)
94 #define pgste_val(x)	((x).pgste)
95 
96 static inline unsigned long pte_val(pte_t pte)
97 {
98 	return pte.pte;
99 }
100 
101 static inline unsigned long pmd_val(pmd_t pmd)
102 {
103 	return pmd.pmd;
104 }
105 
106 static inline unsigned long pud_val(pud_t pud)
107 {
108 	return pud.pud;
109 }
110 
111 static inline unsigned long p4d_val(p4d_t p4d)
112 {
113 	return p4d.p4d;
114 }
115 
116 static inline unsigned long pgd_val(pgd_t pgd)
117 {
118 	return pgd.pgd;
119 }
120 
121 #define __pgste(x)	((pgste_t) { (x) } )
122 #define __pte(x)        ((pte_t) { (x) } )
123 #define __pmd(x)        ((pmd_t) { (x) } )
124 #define __pud(x)	((pud_t) { (x) } )
125 #define __p4d(x)	((p4d_t) { (x) } )
126 #define __pgd(x)        ((pgd_t) { (x) } )
127 #define __pgprot(x)     ((pgprot_t) { (x) } )
128 
129 static inline void page_set_storage_key(unsigned long addr,
130 					unsigned char skey, int mapped)
131 {
132 	if (!mapped)
133 		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
134 			     : : "d" (skey), "a" (addr));
135 	else
136 		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
137 }
138 
139 static inline unsigned char page_get_storage_key(unsigned long addr)
140 {
141 	unsigned char skey;
142 
143 	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
144 	return skey;
145 }
146 
147 static inline int page_reset_referenced(unsigned long addr)
148 {
149 	int cc;
150 
151 	asm volatile(
152 		"	rrbe	0,%[addr]\n"
153 		CC_IPM(cc)
154 		: CC_OUT(cc, cc)
155 		: [addr] "a" (addr)
156 		: CC_CLOBBER);
157 	return CC_TRANSFORM(cc);
158 }
159 
160 /* Bits int the storage key */
161 #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
162 #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
163 #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
164 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
165 
166 struct page;
167 struct folio;
168 void arch_free_page(struct page *page, int order);
169 void arch_alloc_page(struct page *page, int order);
170 
171 static inline int devmem_is_allowed(unsigned long pfn)
172 {
173 	return 0;
174 }
175 
176 #define HAVE_ARCH_FREE_PAGE
177 #define HAVE_ARCH_ALLOC_PAGE
178 
179 int arch_make_folio_accessible(struct folio *folio);
180 #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
181 
182 struct vm_layout {
183 	unsigned long kaslr_offset;
184 	unsigned long kaslr_offset_phys;
185 	unsigned long identity_base;
186 	unsigned long identity_size;
187 };
188 
189 extern struct vm_layout vm_layout;
190 
191 #define __kaslr_offset		vm_layout.kaslr_offset
192 #define __kaslr_offset_phys	vm_layout.kaslr_offset_phys
193 #define __identity_base		vm_layout.identity_base
194 #define ident_map_size		vm_layout.identity_size
195 
196 static inline unsigned long kaslr_offset(void)
197 {
198 	return __kaslr_offset;
199 }
200 
201 extern int __kaslr_enabled;
202 static inline int kaslr_enabled(void)
203 {
204 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
205 		return __kaslr_enabled;
206 	return 0;
207 }
208 
209 #define __PAGE_OFFSET		__identity_base
210 #define PAGE_OFFSET		__PAGE_OFFSET
211 
212 #ifdef __DECOMPRESSOR
213 
214 #define __pa_nodebug(x)		((unsigned long)(x))
215 #define __pa(x)			__pa_nodebug(x)
216 #define __pa32(x)		__pa(x)
217 #define __va(x)			((void *)(unsigned long)(x))
218 
219 #else /* __DECOMPRESSOR */
220 
221 static inline unsigned long __pa_nodebug(unsigned long x)
222 {
223 	if (x < __kaslr_offset)
224 		return x - __identity_base;
225 	return x - __kaslr_offset + __kaslr_offset_phys;
226 }
227 
228 #ifdef CONFIG_DEBUG_VIRTUAL
229 
230 unsigned long __phys_addr(unsigned long x, bool is_31bit);
231 
232 #else /* CONFIG_DEBUG_VIRTUAL */
233 
234 static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
235 {
236 	return __pa_nodebug(x);
237 }
238 
239 #endif /* CONFIG_DEBUG_VIRTUAL */
240 
241 #define __pa(x)			__phys_addr((unsigned long)(x), false)
242 #define __pa32(x)		__phys_addr((unsigned long)(x), true)
243 #define __va(x)			((void *)((unsigned long)(x) + __identity_base))
244 
245 #endif /* __DECOMPRESSOR */
246 
247 #define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
248 #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
249 
250 #define phys_to_page(phys)	pfn_to_page(phys_to_pfn(phys))
251 #define phys_to_folio(phys)	page_folio(phys_to_page(phys))
252 #define page_to_phys(page)	pfn_to_phys(page_to_pfn(page))
253 #define folio_to_phys(page)	pfn_to_phys(folio_pfn(folio))
254 
255 static inline void *pfn_to_virt(unsigned long pfn)
256 {
257 	return __va(pfn_to_phys(pfn));
258 }
259 
260 static inline unsigned long virt_to_pfn(const void *kaddr)
261 {
262 	return phys_to_pfn(__pa(kaddr));
263 }
264 
265 #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
266 
267 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
268 #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
269 
270 #define virt_addr_valid(kaddr)	pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
271 
272 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
273 
274 #endif /* !__ASSEMBLY__ */
275 
276 #include <asm-generic/memory_model.h>
277 #include <asm-generic/getorder.h>
278 
279 #define AMODE31_SIZE		(3 * PAGE_SIZE)
280 
281 #define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
282 #define __NO_KASLR_START_KERNEL	CONFIG_KERNEL_IMAGE_BASE
283 #define __NO_KASLR_END_KERNEL	(__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
284 
285 #define TEXT_OFFSET		0x100000
286 
287 #endif /* _S390_PAGE_H */
288