xref: /linux/arch/s390/include/asm/page.h (revision bc46b7cbc58c4cb562b6a45a1fbc7b8e7b23df58)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  */
7 
8 #ifndef _S390_PAGE_H
9 #define _S390_PAGE_H
10 
11 #include <linux/const.h>
12 #include <asm/types.h>
13 #include <asm/asm.h>
14 
15 #include <vdso/page.h>
16 
17 #define PAGE_DEFAULT_ACC	_AC(0, UL)
18 /* storage-protection override */
19 #define PAGE_SPO_ACC		9
20 #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
21 
22 #define HPAGE_SHIFT	20
23 #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
24 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
25 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
26 #define HUGE_MAX_HSTATE		2
27 
28 #define ARCH_HAS_SETCLEAR_HUGE_PTE
29 #define ARCH_HAS_HUGE_PTE_TYPE
30 #define ARCH_HAS_PREPARE_HUGEPAGE
31 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
32 
33 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
34 
35 #include <asm/setup.h>
36 #ifndef __ASSEMBLER__
37 
38 void __storage_key_init_range(unsigned long start, unsigned long end);
39 
storage_key_init_range(unsigned long start,unsigned long end)40 static inline void storage_key_init_range(unsigned long start, unsigned long end)
41 {
42 	if (PAGE_DEFAULT_KEY != 0)
43 		__storage_key_init_range(start, end);
44 }
45 
46 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
47 
48 /*
49  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
50  * bypass caches when copying a page. Especially when copying huge pages
51  * this keeps L1 and L2 data caches alive.
52  */
copy_page(void * to,void * from)53 static inline void copy_page(void *to, void *from)
54 {
55 	union register_pair dst, src;
56 
57 	dst.even = (unsigned long) to;
58 	dst.odd  = 0x1000;
59 	src.even = (unsigned long) from;
60 	src.odd  = 0xb0001000;
61 
62 	asm volatile(
63 		"	mvcl	%[dst],%[src]"
64 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
65 		: : "memory", "cc");
66 }
67 
68 #define clear_user_page(page, vaddr, pg)	clear_page(page)
69 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
70 
71 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
72 	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
73 
74 #ifdef CONFIG_STRICT_MM_TYPECHECKS
75 #define STRICT_MM_TYPECHECKS
76 #endif
77 
78 #ifdef STRICT_MM_TYPECHECKS
79 
80 typedef struct { unsigned long pgprot; } pgprot_t;
81 typedef struct { unsigned long pgste; } pgste_t;
82 typedef struct { unsigned long pte; } pte_t;
83 typedef struct { unsigned long pmd; } pmd_t;
84 typedef struct { unsigned long pud; } pud_t;
85 typedef struct { unsigned long p4d; } p4d_t;
86 typedef struct { unsigned long pgd; } pgd_t;
87 
88 #define DEFINE_PGVAL_FUNC(name)						\
89 static __always_inline unsigned long name ## _val(name ## _t name)	\
90 {									\
91 	return name.name;						\
92 }
93 
94 #else /* STRICT_MM_TYPECHECKS */
95 
96 typedef unsigned long pgprot_t;
97 typedef unsigned long pgste_t;
98 typedef unsigned long pte_t;
99 typedef unsigned long pmd_t;
100 typedef unsigned long pud_t;
101 typedef unsigned long p4d_t;
102 typedef unsigned long pgd_t;
103 
104 #define DEFINE_PGVAL_FUNC(name)						\
105 static __always_inline unsigned long name ## _val(name ## _t name)	\
106 {									\
107 	return name;							\
108 }
109 
110 #endif /* STRICT_MM_TYPECHECKS */
111 
112 DEFINE_PGVAL_FUNC(pgprot)
113 DEFINE_PGVAL_FUNC(pgste)
114 DEFINE_PGVAL_FUNC(pte)
115 DEFINE_PGVAL_FUNC(pmd)
116 DEFINE_PGVAL_FUNC(pud)
117 DEFINE_PGVAL_FUNC(p4d)
118 DEFINE_PGVAL_FUNC(pgd)
119 
120 typedef pte_t *pgtable_t;
121 
122 #define __pgprot(x)	((pgprot_t) { (x) } )
123 #define __pgste(x)	((pgste_t) { (x) } )
124 #define __pte(x)        ((pte_t) { (x) } )
125 #define __pmd(x)        ((pmd_t) { (x) } )
126 #define __pud(x)	((pud_t) { (x) } )
127 #define __p4d(x)	((p4d_t) { (x) } )
128 #define __pgd(x)        ((pgd_t) { (x) } )
129 
page_set_storage_key(unsigned long addr,unsigned char skey,int mapped)130 static inline void page_set_storage_key(unsigned long addr,
131 					unsigned char skey, int mapped)
132 {
133 	if (!mapped) {
134 		asm volatile(
135 			"	.insn	rrf,0xb22b0000,%[skey],%[addr],8,0"
136 			:
137 			: [skey] "d" (skey), [addr] "a" (addr)
138 			: "memory");
139 	} else {
140 		asm volatile(
141 			"	sske	 %[skey],%[addr]"
142 			:
143 			: [skey] "d" (skey), [addr] "a" (addr)
144 			: "memory");
145 	}
146 }
147 
page_get_storage_key(unsigned long addr)148 static inline unsigned char page_get_storage_key(unsigned long addr)
149 {
150 	unsigned char skey;
151 
152 	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
153 	return skey;
154 }
155 
page_reset_referenced(unsigned long addr)156 static inline int page_reset_referenced(unsigned long addr)
157 {
158 	int cc;
159 
160 	asm volatile(
161 		"	rrbe	0,%[addr]\n"
162 		CC_IPM(cc)
163 		: CC_OUT(cc, cc)
164 		: [addr] "a" (addr)
165 		: CC_CLOBBER);
166 	return CC_TRANSFORM(cc);
167 }
168 
169 /* Bits int the storage key */
170 #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
171 #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
172 #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
173 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
174 
175 struct page;
176 struct folio;
177 void arch_free_page(struct page *page, int order);
178 void arch_alloc_page(struct page *page, int order);
179 
devmem_is_allowed(unsigned long pfn)180 static inline int devmem_is_allowed(unsigned long pfn)
181 {
182 	return 0;
183 }
184 
185 #define HAVE_ARCH_FREE_PAGE
186 #define HAVE_ARCH_ALLOC_PAGE
187 
188 int arch_make_folio_accessible(struct folio *folio);
189 #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
190 
191 struct vm_layout {
192 	unsigned long kaslr_offset;
193 	unsigned long kaslr_offset_phys;
194 	unsigned long identity_base;
195 	unsigned long identity_size;
196 };
197 
198 extern struct vm_layout vm_layout;
199 
200 #define __kaslr_offset		vm_layout.kaslr_offset
201 #define __kaslr_offset_phys	vm_layout.kaslr_offset_phys
202 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
203 #define __identity_base		vm_layout.identity_base
204 #else
205 #define __identity_base		0UL
206 #endif
207 #define ident_map_size		vm_layout.identity_size
208 
kaslr_offset(void)209 static inline unsigned long kaslr_offset(void)
210 {
211 	return __kaslr_offset;
212 }
213 
214 extern int __kaslr_enabled;
kaslr_enabled(void)215 static inline int kaslr_enabled(void)
216 {
217 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
218 		return __kaslr_enabled;
219 	return 0;
220 }
221 
222 #define __PAGE_OFFSET		__identity_base
223 #define PAGE_OFFSET		__PAGE_OFFSET
224 
225 #ifdef __DECOMPRESSOR
226 
227 #define __pa_nodebug(x)		((unsigned long)(x))
228 #define __pa(x)			__pa_nodebug(x)
229 #define __pa32(x)		__pa(x)
230 #define __va(x)			((void *)(unsigned long)(x))
231 
232 #else /* __DECOMPRESSOR */
233 
__pa_nodebug(unsigned long x)234 static inline unsigned long __pa_nodebug(unsigned long x)
235 {
236 	if (x < __kaslr_offset)
237 		return x - __identity_base;
238 	return x - __kaslr_offset + __kaslr_offset_phys;
239 }
240 
241 #ifdef CONFIG_DEBUG_VIRTUAL
242 
243 unsigned long __phys_addr(unsigned long x, bool is_31bit);
244 
245 #else /* CONFIG_DEBUG_VIRTUAL */
246 
__phys_addr(unsigned long x,bool is_31bit)247 static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
248 {
249 	return __pa_nodebug(x);
250 }
251 
252 #endif /* CONFIG_DEBUG_VIRTUAL */
253 
254 #define __pa(x)			__phys_addr((unsigned long)(x), false)
255 #define __pa32(x)		__phys_addr((unsigned long)(x), true)
256 #define __va(x)			((void *)((unsigned long)(x) + __identity_base))
257 
258 #endif /* __DECOMPRESSOR */
259 
260 #define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
261 #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
262 
263 #define phys_to_folio(phys)	page_folio(phys_to_page(phys))
264 #define folio_to_phys(page)	pfn_to_phys(folio_pfn(folio))
265 
pfn_to_virt(unsigned long pfn)266 static inline void *pfn_to_virt(unsigned long pfn)
267 {
268 	return __va(pfn_to_phys(pfn));
269 }
270 
virt_to_pfn(const void * kaddr)271 static inline unsigned long virt_to_pfn(const void *kaddr)
272 {
273 	return phys_to_pfn(__pa(kaddr));
274 }
275 
276 #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
277 
278 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
279 #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
280 
281 #define virt_addr_valid(kaddr)	pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
282 
283 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
284 
285 #endif /* !__ASSEMBLER__ */
286 
287 #include <asm-generic/memory_model.h>
288 #include <asm-generic/getorder.h>
289 
290 #define AMODE31_SIZE		(3 * PAGE_SIZE)
291 
292 #define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
293 #define __NO_KASLR_START_KERNEL	CONFIG_KERNEL_IMAGE_BASE
294 #define __NO_KASLR_END_KERNEL	(__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
295 
296 #define TEXT_OFFSET		0x100000
297 
298 #endif /* _S390_PAGE_H */
299