xref: /linux/arch/s390/include/asm/page.h (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  */
7 
8 #ifndef _S390_PAGE_H
9 #define _S390_PAGE_H
10 
11 #include <linux/const.h>
12 #include <asm/types.h>
13 #include <asm/asm.h>
14 
15 #include <vdso/page.h>
16 
17 #define PAGE_DEFAULT_ACC	_AC(0, UL)
18 /* storage-protection override */
19 #define PAGE_SPO_ACC		9
20 #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
21 
22 #define HPAGE_SHIFT	20
23 #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
24 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
25 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
26 #define HUGE_MAX_HSTATE		2
27 
28 #define ARCH_HAS_SETCLEAR_HUGE_PTE
29 #define ARCH_HAS_HUGE_PTE_TYPE
30 #define ARCH_HAS_PREPARE_HUGEPAGE
31 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
32 
33 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
34 
35 #include <asm/setup.h>
36 #ifndef __ASSEMBLER__
37 
38 void __storage_key_init_range(unsigned long start, unsigned long end);
39 
40 static inline void storage_key_init_range(unsigned long start, unsigned long end)
41 {
42 	if (PAGE_DEFAULT_KEY != 0)
43 		__storage_key_init_range(start, end);
44 }
45 
46 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
47 
48 /*
49  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
50  * bypass caches when copying a page. Especially when copying huge pages
51  * this keeps L1 and L2 data caches alive.
52  */
53 static inline void copy_page(void *to, void *from)
54 {
55 	union register_pair dst, src;
56 
57 	dst.even = (unsigned long) to;
58 	dst.odd  = 0x1000;
59 	src.even = (unsigned long) from;
60 	src.odd  = 0xb0001000;
61 
62 	asm volatile(
63 		"	mvcl	%[dst],%[src]"
64 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
65 		: : "memory", "cc");
66 }
67 
68 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
69 
70 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
71 	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
72 
73 #ifdef CONFIG_STRICT_MM_TYPECHECKS
74 #define STRICT_MM_TYPECHECKS
75 #endif
76 
77 #ifdef STRICT_MM_TYPECHECKS
78 
79 typedef struct { unsigned long pgprot; } pgprot_t;
80 typedef struct { unsigned long pgste; } pgste_t;
81 typedef struct { unsigned long pte; } pte_t;
82 typedef struct { unsigned long pmd; } pmd_t;
83 typedef struct { unsigned long pud; } pud_t;
84 typedef struct { unsigned long p4d; } p4d_t;
85 typedef struct { unsigned long pgd; } pgd_t;
86 
87 #define DEFINE_PGVAL_FUNC(name)						\
88 static __always_inline unsigned long name ## _val(name ## _t name)	\
89 {									\
90 	return name.name;						\
91 }
92 
93 #else /* STRICT_MM_TYPECHECKS */
94 
95 typedef unsigned long pgprot_t;
96 typedef unsigned long pgste_t;
97 typedef unsigned long pte_t;
98 typedef unsigned long pmd_t;
99 typedef unsigned long pud_t;
100 typedef unsigned long p4d_t;
101 typedef unsigned long pgd_t;
102 
103 #define DEFINE_PGVAL_FUNC(name)						\
104 static __always_inline unsigned long name ## _val(name ## _t name)	\
105 {									\
106 	return name;							\
107 }
108 
109 #endif /* STRICT_MM_TYPECHECKS */
110 
111 DEFINE_PGVAL_FUNC(pgprot)
112 DEFINE_PGVAL_FUNC(pgste)
113 DEFINE_PGVAL_FUNC(pte)
114 DEFINE_PGVAL_FUNC(pmd)
115 DEFINE_PGVAL_FUNC(pud)
116 DEFINE_PGVAL_FUNC(p4d)
117 DEFINE_PGVAL_FUNC(pgd)
118 
119 typedef pte_t *pgtable_t;
120 
121 #define __pgprot(x)	((pgprot_t) { (x) } )
122 #define __pgste(x)	((pgste_t) { (x) } )
123 #define __pte(x)        ((pte_t) { (x) } )
124 #define __pmd(x)        ((pmd_t) { (x) } )
125 #define __pud(x)	((pud_t) { (x) } )
126 #define __p4d(x)	((p4d_t) { (x) } )
127 #define __pgd(x)        ((pgd_t) { (x) } )
128 
129 static inline void page_set_storage_key(unsigned long addr,
130 					unsigned char skey, int mapped)
131 {
132 	if (!mapped) {
133 		asm volatile(
134 			"	.insn	rrf,0xb22b0000,%[skey],%[addr],8,0"
135 			:
136 			: [skey] "d" (skey), [addr] "a" (addr)
137 			: "memory");
138 	} else {
139 		asm volatile(
140 			"	sske	 %[skey],%[addr]"
141 			:
142 			: [skey] "d" (skey), [addr] "a" (addr)
143 			: "memory");
144 	}
145 }
146 
147 static inline unsigned char page_get_storage_key(unsigned long addr)
148 {
149 	unsigned char skey;
150 
151 	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
152 	return skey;
153 }
154 
155 static inline int page_reset_referenced(unsigned long addr)
156 {
157 	int cc;
158 
159 	asm volatile(
160 		"	rrbe	0,%[addr]\n"
161 		CC_IPM(cc)
162 		: CC_OUT(cc, cc)
163 		: [addr] "a" (addr)
164 		: CC_CLOBBER);
165 	return CC_TRANSFORM(cc);
166 }
167 
168 int split_pud_page(pud_t *pudp, unsigned long addr);
169 
170 /* Bits int the storage key */
171 #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
172 #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
173 #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
174 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
175 
176 struct page;
177 struct folio;
178 void arch_free_page(struct page *page, int order);
179 void arch_alloc_page(struct page *page, int order);
180 
181 static inline int devmem_is_allowed(unsigned long pfn)
182 {
183 	return 0;
184 }
185 
186 #define HAVE_ARCH_FREE_PAGE
187 #define HAVE_ARCH_ALLOC_PAGE
188 
189 int arch_make_folio_accessible(struct folio *folio);
190 #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
191 
192 struct vm_layout {
193 	unsigned long kaslr_offset;
194 	unsigned long kaslr_offset_phys;
195 	unsigned long identity_base;
196 	unsigned long identity_size;
197 };
198 
199 extern struct vm_layout vm_layout;
200 
201 #define __kaslr_offset		vm_layout.kaslr_offset
202 #define __kaslr_offset_phys	vm_layout.kaslr_offset_phys
203 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
204 #define __identity_base		vm_layout.identity_base
205 #else
206 #define __identity_base		0UL
207 #endif
208 #define ident_map_size		vm_layout.identity_size
209 
210 static inline unsigned long kaslr_offset(void)
211 {
212 	return __kaslr_offset;
213 }
214 
215 extern int __kaslr_enabled;
216 static inline int kaslr_enabled(void)
217 {
218 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
219 		return __kaslr_enabled;
220 	return 0;
221 }
222 
223 #define __PAGE_OFFSET		__identity_base
224 #define PAGE_OFFSET		__PAGE_OFFSET
225 
226 #ifdef __DECOMPRESSOR
227 
228 #define __pa_nodebug(x)		((unsigned long)(x))
229 #define __pa(x)			__pa_nodebug(x)
230 #define __pa32(x)		__pa(x)
231 #define __va(x)			((void *)(unsigned long)(x))
232 
233 #else /* __DECOMPRESSOR */
234 
235 static inline unsigned long __pa_nodebug(unsigned long x)
236 {
237 	if (x < __kaslr_offset)
238 		return x - __identity_base;
239 	return x - __kaslr_offset + __kaslr_offset_phys;
240 }
241 
242 #ifdef CONFIG_DEBUG_VIRTUAL
243 
244 unsigned long __phys_addr(unsigned long x, bool is_31bit);
245 
246 #else /* CONFIG_DEBUG_VIRTUAL */
247 
248 static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
249 {
250 	return __pa_nodebug(x);
251 }
252 
253 #endif /* CONFIG_DEBUG_VIRTUAL */
254 
255 #define __pa(x)			__phys_addr((unsigned long)(x), false)
256 #define __pa32(x)		__phys_addr((unsigned long)(x), true)
257 #define __va(x)			((void *)((unsigned long)(x) + __identity_base))
258 
259 #endif /* __DECOMPRESSOR */
260 
261 #define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
262 #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
263 
264 #define phys_to_folio(phys)	page_folio(phys_to_page(phys))
265 #define folio_to_phys(page)	pfn_to_phys(folio_pfn(folio))
266 
267 static inline void *pfn_to_virt(unsigned long pfn)
268 {
269 	return __va(pfn_to_phys(pfn));
270 }
271 
272 static inline unsigned long virt_to_pfn(const void *kaddr)
273 {
274 	return phys_to_pfn(__pa(kaddr));
275 }
276 
277 #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
278 
279 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
280 #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
281 
282 #define virt_addr_valid(kaddr)	pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
283 
284 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
285 
286 #endif /* !__ASSEMBLER__ */
287 
288 #include <asm-generic/memory_model.h>
289 #include <asm-generic/getorder.h>
290 
291 #define AMODE31_SIZE		(3 * PAGE_SIZE)
292 
293 #define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
294 #define __NO_KASLR_START_KERNEL	CONFIG_KERNEL_IMAGE_BASE
295 #define __NO_KASLR_END_KERNEL	(__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
296 
297 #define TEXT_OFFSET		0x100000
298 
299 #endif /* _S390_PAGE_H */
300