xref: /linux/arch/s390/include/asm/pgtable.h (revision 2a4c0c11c0193889446cdb6f1540cc2b9aff97dd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/cpufeature.h>
18 #include <linux/page-flags.h>
19 #include <linux/page_table_check.h>
20 #include <linux/radix-tree.h>
21 #include <linux/atomic.h>
22 #include <linux/mmap_lock.h>
23 #include <asm/ctlreg.h>
24 #include <asm/bug.h>
25 #include <asm/page.h>
26 #include <asm/uv.h>
27 
28 extern pgd_t swapper_pg_dir[];
29 extern pgd_t invalid_pg_dir[];
30 extern void paging_init(void);
31 extern struct ctlreg s390_invalid_asce;
32 
33 enum {
34 	PG_DIRECT_MAP_4K = 0,
35 	PG_DIRECT_MAP_1M,
36 	PG_DIRECT_MAP_2G,
37 	PG_DIRECT_MAP_MAX
38 };
39 
40 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
41 
update_page_count(int level,long count)42 static inline void update_page_count(int level, long count)
43 {
44 	if (IS_ENABLED(CONFIG_PROC_FS))
45 		atomic_long_add(count, &direct_pages_count[level]);
46 }
47 
48 /*
49  * The S390 doesn't have any external MMU info: the kernel page
50  * tables contain all the necessary information.
51  */
52 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
53 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
54 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
55 
56 /*
57  * ZERO_PAGE is a global shared page that is always zero; used
58  * for zero-mapped memory areas etc..
59  */
60 
61 extern unsigned long empty_zero_page;
62 extern unsigned long zero_page_mask;
63 
64 #define ZERO_PAGE(vaddr) \
65 	(virt_to_page((void *)(empty_zero_page + \
66 	 (((unsigned long)(vaddr)) &zero_page_mask))))
67 #define __HAVE_COLOR_ZERO_PAGE
68 
69 /* TODO: s390 cannot support io_remap_pfn_range... */
70 
71 #define pte_ERROR(e) \
72 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
73 #define pmd_ERROR(e) \
74 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
75 #define pud_ERROR(e) \
76 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
77 #define p4d_ERROR(e) \
78 	pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
79 #define pgd_ERROR(e) \
80 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
81 
82 /*
83  * The vmalloc and module area will always be on the topmost area of the
84  * kernel mapping. 512GB are reserved for vmalloc by default.
85  * At the top of the vmalloc area a 2GB area is reserved where modules
86  * will reside. That makes sure that inter module branches always
87  * happen without trampolines and in addition the placement within a
88  * 2GB frame is branch prediction unit friendly.
89  */
90 extern unsigned long VMALLOC_START;
91 extern unsigned long VMALLOC_END;
92 #define VMALLOC_DEFAULT_SIZE	((512UL << 30) - MODULES_LEN)
93 extern struct page *vmemmap;
94 extern unsigned long vmemmap_size;
95 
96 extern unsigned long MODULES_VADDR;
97 extern unsigned long MODULES_END;
98 #define MODULES_VADDR	MODULES_VADDR
99 #define MODULES_END	MODULES_END
100 #define MODULES_LEN	(1UL << 31)
101 
is_module_addr(void * addr)102 static inline int is_module_addr(void *addr)
103 {
104 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 	if (addr < (void *)MODULES_VADDR)
106 		return 0;
107 	if (addr > (void *)MODULES_END)
108 		return 0;
109 	return 1;
110 }
111 
112 #ifdef CONFIG_KMSAN
113 #define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
114 #define KMSAN_VMALLOC_SHADOW_START VMALLOC_END
115 #define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE)
116 #define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END
117 #define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE)
118 #define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END
119 #define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
120 #define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END
121 #define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN)
122 #endif
123 
124 #ifdef CONFIG_RANDOMIZE_BASE
125 #define KASLR_LEN	(1UL << 31)
126 #else
127 #define KASLR_LEN	0UL
128 #endif
129 
130 void setup_protection_map(void);
131 
132 /*
133  * A 64 bit pagetable entry of S390 has following format:
134  * |			 PFRA			      |0IPC|  OS  |
135  * 0000000000111111111122222222223333333333444444444455555555556666
136  * 0123456789012345678901234567890123456789012345678901234567890123
137  *
138  * I Page-Invalid Bit:    Page is not available for address-translation
139  * P Page-Protection Bit: Store access not possible for page
140  * C Change-bit override: HW is not required to set change bit
141  *
142  * A 64 bit segmenttable entry of S390 has following format:
143  * |        P-table origin                              |      TT
144  * 0000000000111111111122222222223333333333444444444455555555556666
145  * 0123456789012345678901234567890123456789012345678901234567890123
146  *
147  * I Segment-Invalid Bit:    Segment is not available for address-translation
148  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
149  * P Page-Protection Bit: Store access not possible for page
150  * TT Type 00
151  *
152  * A 64 bit region table entry of S390 has following format:
153  * |        S-table origin                             |   TF  TTTL
154  * 0000000000111111111122222222223333333333444444444455555555556666
155  * 0123456789012345678901234567890123456789012345678901234567890123
156  *
157  * I Segment-Invalid Bit:    Segment is not available for address-translation
158  * TT Type 01
159  * TF
160  * TL Table length
161  *
162  * The 64 bit regiontable origin of S390 has following format:
163  * |      region table origon                          |       DTTL
164  * 0000000000111111111122222222223333333333444444444455555555556666
165  * 0123456789012345678901234567890123456789012345678901234567890123
166  *
167  * X Space-Switch event:
168  * G Segment-Invalid Bit:
169  * P Private-Space Bit:
170  * S Storage-Alteration:
171  * R Real space
172  * TL Table-Length:
173  *
174  * A storage key has the following format:
175  * | ACC |F|R|C|0|
176  *  0   3 4 5 6 7
177  * ACC: access key
178  * F  : fetch protection bit
179  * R  : referenced bit
180  * C  : changed bit
181  */
182 
183 /* Hardware bits in the page table entry */
184 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
185 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
186 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
187 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
188 
189 /* Software bits in the page table entry */
190 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
191 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
192 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
193 #define _PAGE_READ	0x010		/* SW pte read bit */
194 #define _PAGE_WRITE	0x020		/* SW pte write bit */
195 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
196 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
197 
198 #ifdef CONFIG_MEM_SOFT_DIRTY
199 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
200 #else
201 #define _PAGE_SOFT_DIRTY 0x000
202 #endif
203 
204 #define _PAGE_SW_BITS	0xffUL		/* All SW bits */
205 
206 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE	/* SW pte exclusive swap bit */
207 
208 /* Set of bits not changed in pte_modify */
209 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
210 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
211 
212 /*
213  * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
214  * HW bit and all SW bits.
215  */
216 #define _PAGE_RDP_MASK		~(_PAGE_PROTECT | _PAGE_SW_BITS)
217 
218 /*
219  * handle_pte_fault uses pte_present and pte_none to find out the pte type
220  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
221  * distinguish present from not-present ptes. It is changed only with the page
222  * table lock held.
223  *
224  * The following table gives the different possible bit combinations for
225  * the pte hardware and software bits in the last 12 bits of a pte
226  * (. unassigned bit, x don't care, t swap type):
227  *
228  *				842100000000
229  *				000084210000
230  *				000000008421
231  *				.IR.uswrdy.p
232  * empty			.10.00000000
233  * swap				.11..ttttt.0
234  * prot-none, clean, old	.11.xx0000.1
235  * prot-none, clean, young	.11.xx0001.1
236  * prot-none, dirty, old	.11.xx0010.1
237  * prot-none, dirty, young	.11.xx0011.1
238  * read-only, clean, old	.11.xx0100.1
239  * read-only, clean, young	.01.xx0101.1
240  * read-only, dirty, old	.11.xx0110.1
241  * read-only, dirty, young	.01.xx0111.1
242  * read-write, clean, old	.11.xx1100.1
243  * read-write, clean, young	.01.xx1101.1
244  * read-write, dirty, old	.10.xx1110.1
245  * read-write, dirty, young	.00.xx1111.1
246  * HW-bits: R read-only, I invalid
247  * SW-bits: p present, y young, d dirty, r read, w write, s special,
248  *	    u unused, l large
249  *
250  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
251  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
252  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
253  */
254 
255 /* Bits in the segment/region table address-space-control-element */
256 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
257 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
258 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
259 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
260 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
261 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
262 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
263 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
264 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
265 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
266 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
267 
268 /* Bits in the region table entry */
269 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
270 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
271 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
272 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
273 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
274 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
275 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
276 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
277 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
278 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
279 
280 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
281 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
282 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
283 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
284 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \
285 				 _REGION3_ENTRY_PRESENT)
286 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
287 
288 #define _REGION3_ENTRY_HARDWARE_BITS		0xfffffffffffff6ffUL
289 #define _REGION3_ENTRY_HARDWARE_BITS_LARGE	0xffffffff8001073cUL
290 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
291 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
292 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
293 #define _REGION3_ENTRY_COMM	0x0010	/* Common-Region, marks swap entry */
294 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
295 #define _REGION3_ENTRY_WRITE	0x8000	/* SW region write bit */
296 #define _REGION3_ENTRY_READ	0x4000	/* SW region read bit */
297 
298 #ifdef CONFIG_MEM_SOFT_DIRTY
299 #define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */
300 #else
301 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
302 #endif
303 
304 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
305 
306 /*
307  * SW region present bit. For non-leaf region-third-table entries, bits 62-63
308  * indicate the TABLE LENGTH and both must be set to 1. But such entries
309  * would always be considered as present, so it is safe to use bit 63 as
310  * PRESENT bit for PUD.
311  */
312 #define _REGION3_ENTRY_PRESENT	0x0001
313 
314 /* Bits in the segment table entry */
315 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe3fUL
316 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe3cUL
317 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff1073cUL
318 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
319 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
320 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
321 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
322 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
323 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
324 
325 #define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PRESENT)
326 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
327 
328 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
329 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
330 
331 #define _SEGMENT_ENTRY_COMM	0x0010	/* Common-Segment, marks swap entry */
332 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
333 #define _SEGMENT_ENTRY_WRITE	0x8000	/* SW segment write bit */
334 #define _SEGMENT_ENTRY_READ	0x4000	/* SW segment read bit */
335 
336 #ifdef CONFIG_MEM_SOFT_DIRTY
337 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */
338 #else
339 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
340 #endif
341 
342 #define _SEGMENT_ENTRY_PRESENT	0x0001	/* SW segment present bit */
343 
344 /* Common bits in region and segment table entries, for swap entries */
345 #define _RST_ENTRY_COMM		0x0010	/* Common-Region/Segment, marks swap entry */
346 #define _RST_ENTRY_INVALID	0x0020	/* invalid region/segment table entry */
347 
348 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
349 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
350 
351 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
352 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
353 
354 #define _REGION1_SHIFT	53
355 #define _REGION2_SHIFT	42
356 #define _REGION3_SHIFT	31
357 #define _SEGMENT_SHIFT	20
358 
359 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
360 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
361 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
362 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
363 #define _PAGE_INDEX	(0xffUL  << PAGE_SHIFT)
364 
365 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
366 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
367 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
368 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
369 
370 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
371 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
372 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
373 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
374 
375 #define PMD_SHIFT	_SEGMENT_SHIFT
376 #define PUD_SHIFT	_REGION3_SHIFT
377 #define P4D_SHIFT	_REGION2_SHIFT
378 #define PGDIR_SHIFT	_REGION1_SHIFT
379 
380 #define PMD_SIZE	_SEGMENT_SIZE
381 #define PUD_SIZE	_REGION3_SIZE
382 #define P4D_SIZE	_REGION2_SIZE
383 #define PGDIR_SIZE	_REGION1_SIZE
384 
385 #define PMD_MASK	_SEGMENT_MASK
386 #define PUD_MASK	_REGION3_MASK
387 #define P4D_MASK	_REGION2_MASK
388 #define PGDIR_MASK	_REGION1_MASK
389 
390 #define PTRS_PER_PTE	_PAGE_ENTRIES
391 #define PTRS_PER_PMD	_CRST_ENTRIES
392 #define PTRS_PER_PUD	_CRST_ENTRIES
393 #define PTRS_PER_P4D	_CRST_ENTRIES
394 #define PTRS_PER_PGD	_CRST_ENTRIES
395 
396 /*
397  * Segment table and region3 table entry encoding
398  * (R = read-only, I = invalid, y = young bit):
399  *				dy..R...I...wr
400  * prot-none, clean, old	00..1...1...00
401  * prot-none, clean, young	01..1...1...00
402  * prot-none, dirty, old	10..1...1...00
403  * prot-none, dirty, young	11..1...1...00
404  * read-only, clean, old	00..1...1...01
405  * read-only, clean, young	01..1...0...01
406  * read-only, dirty, old	10..1...1...01
407  * read-only, dirty, young	11..1...0...01
408  * read-write, clean, old	00..1...1...11
409  * read-write, clean, young	01..1...0...11
410  * read-write, dirty, old	10..0...1...11
411  * read-write, dirty, young	11..0...0...11
412  * The segment table origin is used to distinguish empty (origin==0) from
413  * read-write, old segment table entries (origin!=0)
414  * HW-bits: R read-only, I invalid
415  * SW-bits: y young, d dirty, r read, w write
416  */
417 
418 /*
419  * A user page table pointer has the space-switch-event bit, the
420  * private-space-control bit and the storage-alteration-event-control
421  * bit set. A kernel page table pointer doesn't need them.
422  */
423 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
424 				 _ASCE_ALT_EVENT)
425 
426 /*
427  * Page protection definitions.
428  */
429 #define __PAGE_NONE		(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
430 #define __PAGE_RO		(_PAGE_PRESENT | _PAGE_READ | \
431 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
432 #define __PAGE_RX		(_PAGE_PRESENT | _PAGE_READ | \
433 				 _PAGE_INVALID | _PAGE_PROTECT)
434 #define __PAGE_RW		(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
435 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
436 #define __PAGE_RWX		(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
437 				 _PAGE_INVALID | _PAGE_PROTECT)
438 #define __PAGE_SHARED		(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
439 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
440 #define __PAGE_KERNEL		(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
441 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
442 #define __PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
443 				 _PAGE_PROTECT | _PAGE_NOEXEC)
444 
445 extern unsigned long page_noexec_mask;
446 
447 #define __pgprot_page_mask(x)	__pgprot((x) & page_noexec_mask)
448 
449 #define PAGE_NONE		__pgprot_page_mask(__PAGE_NONE)
450 #define PAGE_RO			__pgprot_page_mask(__PAGE_RO)
451 #define PAGE_RX			__pgprot_page_mask(__PAGE_RX)
452 #define PAGE_RW			__pgprot_page_mask(__PAGE_RW)
453 #define PAGE_RWX		__pgprot_page_mask(__PAGE_RWX)
454 #define PAGE_SHARED		__pgprot_page_mask(__PAGE_SHARED)
455 #define PAGE_KERNEL		__pgprot_page_mask(__PAGE_KERNEL)
456 #define PAGE_KERNEL_RO		__pgprot_page_mask(__PAGE_KERNEL_RO)
457 
458 /*
459  * Segment entry (large page) protection definitions.
460  */
461 #define __SEGMENT_NONE		(_SEGMENT_ENTRY_PRESENT | \
462 				 _SEGMENT_ENTRY_INVALID | \
463 				 _SEGMENT_ENTRY_PROTECT)
464 #define __SEGMENT_RO		(_SEGMENT_ENTRY_PRESENT | \
465 				 _SEGMENT_ENTRY_PROTECT | \
466 				 _SEGMENT_ENTRY_READ | \
467 				 _SEGMENT_ENTRY_NOEXEC)
468 #define __SEGMENT_RX		(_SEGMENT_ENTRY_PRESENT | \
469 				 _SEGMENT_ENTRY_PROTECT | \
470 				 _SEGMENT_ENTRY_READ)
471 #define __SEGMENT_RW		(_SEGMENT_ENTRY_PRESENT | \
472 				 _SEGMENT_ENTRY_READ | \
473 				 _SEGMENT_ENTRY_WRITE | \
474 				 _SEGMENT_ENTRY_NOEXEC)
475 #define __SEGMENT_RWX		(_SEGMENT_ENTRY_PRESENT | \
476 				 _SEGMENT_ENTRY_READ | \
477 				 _SEGMENT_ENTRY_WRITE)
478 #define __SEGMENT_KERNEL	(_SEGMENT_ENTRY |	\
479 				 _SEGMENT_ENTRY_LARGE |	\
480 				 _SEGMENT_ENTRY_READ |	\
481 				 _SEGMENT_ENTRY_WRITE | \
482 				 _SEGMENT_ENTRY_YOUNG | \
483 				 _SEGMENT_ENTRY_DIRTY | \
484 				 _SEGMENT_ENTRY_NOEXEC)
485 #define __SEGMENT_KERNEL_RO	(_SEGMENT_ENTRY |	\
486 				 _SEGMENT_ENTRY_LARGE |	\
487 				 _SEGMENT_ENTRY_READ |	\
488 				 _SEGMENT_ENTRY_YOUNG |	\
489 				 _SEGMENT_ENTRY_PROTECT | \
490 				 _SEGMENT_ENTRY_NOEXEC)
491 
492 extern unsigned long segment_noexec_mask;
493 
494 #define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask)
495 
496 #define SEGMENT_NONE		__pgprot_segment_mask(__SEGMENT_NONE)
497 #define SEGMENT_RO		__pgprot_segment_mask(__SEGMENT_RO)
498 #define SEGMENT_RX		__pgprot_segment_mask(__SEGMENT_RX)
499 #define SEGMENT_RW		__pgprot_segment_mask(__SEGMENT_RW)
500 #define SEGMENT_RWX		__pgprot_segment_mask(__SEGMENT_RWX)
501 #define SEGMENT_KERNEL		__pgprot_segment_mask(__SEGMENT_KERNEL)
502 #define SEGMENT_KERNEL_RO	__pgprot_segment_mask(__SEGMENT_KERNEL_RO)
503 
504 /*
505  * Region3 entry (large page) protection definitions.
506  */
507 
508 #define __REGION3_KERNEL	(_REGION_ENTRY_TYPE_R3 | \
509 				 _REGION3_ENTRY_PRESENT | \
510 				 _REGION3_ENTRY_LARGE | \
511 				 _REGION3_ENTRY_READ | \
512 				 _REGION3_ENTRY_WRITE | \
513 				 _REGION3_ENTRY_YOUNG | \
514 				 _REGION3_ENTRY_DIRTY | \
515 				 _REGION_ENTRY_NOEXEC)
516 #define __REGION3_KERNEL_RO	(_REGION_ENTRY_TYPE_R3 | \
517 				 _REGION3_ENTRY_PRESENT | \
518 				 _REGION3_ENTRY_LARGE | \
519 				 _REGION3_ENTRY_READ | \
520 				 _REGION3_ENTRY_YOUNG | \
521 				 _REGION_ENTRY_PROTECT | \
522 				 _REGION_ENTRY_NOEXEC)
523 
524 extern unsigned long region_noexec_mask;
525 
526 #define __pgprot_region_mask(x)	__pgprot((x) & region_noexec_mask)
527 
528 #define REGION3_KERNEL		__pgprot_region_mask(__REGION3_KERNEL)
529 #define REGION3_KERNEL_RO	__pgprot_region_mask(__REGION3_KERNEL_RO)
530 
mm_p4d_folded(struct mm_struct * mm)531 static inline bool mm_p4d_folded(struct mm_struct *mm)
532 {
533 	return mm->context.asce_limit <= _REGION1_SIZE;
534 }
535 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
536 
mm_pud_folded(struct mm_struct * mm)537 static inline bool mm_pud_folded(struct mm_struct *mm)
538 {
539 	return mm->context.asce_limit <= _REGION2_SIZE;
540 }
541 #define mm_pud_folded(mm) mm_pud_folded(mm)
542 
mm_pmd_folded(struct mm_struct * mm)543 static inline bool mm_pmd_folded(struct mm_struct *mm)
544 {
545 	return mm->context.asce_limit <= _REGION3_SIZE;
546 }
547 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
548 
mm_is_protected(struct mm_struct * mm)549 static inline int mm_is_protected(struct mm_struct *mm)
550 {
551 #if IS_ENABLED(CONFIG_KVM)
552 	if (unlikely(atomic_read(&mm->context.protected_count)))
553 		return 1;
554 #endif
555 	return 0;
556 }
557 
clear_pte_bit(pte_t pte,pgprot_t prot)558 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
559 {
560 	return __pte(pte_val(pte) & ~pgprot_val(prot));
561 }
562 
set_pte_bit(pte_t pte,pgprot_t prot)563 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
564 {
565 	return __pte(pte_val(pte) | pgprot_val(prot));
566 }
567 
clear_pmd_bit(pmd_t pmd,pgprot_t prot)568 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
569 {
570 	return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
571 }
572 
set_pmd_bit(pmd_t pmd,pgprot_t prot)573 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
574 {
575 	return __pmd(pmd_val(pmd) | pgprot_val(prot));
576 }
577 
clear_pud_bit(pud_t pud,pgprot_t prot)578 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
579 {
580 	return __pud(pud_val(pud) & ~pgprot_val(prot));
581 }
582 
set_pud_bit(pud_t pud,pgprot_t prot)583 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
584 {
585 	return __pud(pud_val(pud) | pgprot_val(prot));
586 }
587 
588 /*
589  * As soon as the guest uses storage keys or enables PV, we deduplicate all
590  * mapped shared zeropages and prevent new shared zeropages from getting
591  * mapped.
592  */
593 #define mm_forbids_zeropage mm_forbids_zeropage
mm_forbids_zeropage(struct mm_struct * mm)594 static inline int mm_forbids_zeropage(struct mm_struct *mm)
595 {
596 #if IS_ENABLED(CONFIG_KVM)
597 	if (!mm->context.allow_cow_sharing)
598 		return 1;
599 #endif
600 	return 0;
601 }
602 
603 /**
604  * cspg() - Compare and Swap and Purge (CSPG)
605  * @ptr: Pointer to the value to be exchanged
606  * @old: The expected old value
607  * @new: The new value
608  *
609  * Return: True if compare and swap was successful, otherwise false.
610  */
cspg(unsigned long * ptr,unsigned long old,unsigned long new)611 static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new)
612 {
613 	union register_pair r1 = { .even = old, .odd = new, };
614 	unsigned long address = (unsigned long)ptr | 1;
615 
616 	asm volatile(
617 		"	cspg	%[r1],%[address]"
618 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
619 		: [address] "d" (address)
620 		: "cc");
621 	return old == r1.even;
622 }
623 
624 #define CRDTE_DTT_PAGE		0x00UL
625 #define CRDTE_DTT_SEGMENT	0x10UL
626 #define CRDTE_DTT_REGION3	0x14UL
627 #define CRDTE_DTT_REGION2	0x18UL
628 #define CRDTE_DTT_REGION1	0x1cUL
629 
630 /**
631  * crdte() - Compare and Replace DAT Table Entry
632  * @old:     The expected old value
633  * @new:     The new value
634  * @table:   Pointer to the value to be exchanged
635  * @dtt:     Table type of the table to be exchanged
636  * @address: The address mapped by the entry to be replaced
637  * @asce:    The ASCE of this entry
638  *
639  * Return: True if compare and replace was successful, otherwise false.
640  */
crdte(unsigned long old,unsigned long new,unsigned long * table,unsigned long dtt,unsigned long address,unsigned long asce)641 static inline bool crdte(unsigned long old, unsigned long new,
642 			 unsigned long *table, unsigned long dtt,
643 			 unsigned long address, unsigned long asce)
644 {
645 	union register_pair r1 = { .even = old, .odd = new, };
646 	union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
647 
648 	asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
649 		     : [r1] "+&d" (r1.pair)
650 		     : [r2] "d" (r2.pair), [asce] "a" (asce)
651 		     : "memory", "cc");
652 	return old == r1.even;
653 }
654 
655 /*
656  * pgd/p4d/pud/pmd/pte query functions
657  */
pgd_folded(pgd_t pgd)658 static inline int pgd_folded(pgd_t pgd)
659 {
660 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
661 }
662 
pgd_present(pgd_t pgd)663 static inline int pgd_present(pgd_t pgd)
664 {
665 	if (pgd_folded(pgd))
666 		return 1;
667 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
668 }
669 
pgd_none(pgd_t pgd)670 static inline int pgd_none(pgd_t pgd)
671 {
672 	if (pgd_folded(pgd))
673 		return 0;
674 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
675 }
676 
pgd_bad(pgd_t pgd)677 static inline int pgd_bad(pgd_t pgd)
678 {
679 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
680 		return 0;
681 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
682 }
683 
pgd_pfn(pgd_t pgd)684 static inline unsigned long pgd_pfn(pgd_t pgd)
685 {
686 	unsigned long origin_mask;
687 
688 	origin_mask = _REGION_ENTRY_ORIGIN;
689 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
690 }
691 
p4d_folded(p4d_t p4d)692 static inline int p4d_folded(p4d_t p4d)
693 {
694 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
695 }
696 
p4d_present(p4d_t p4d)697 static inline int p4d_present(p4d_t p4d)
698 {
699 	if (p4d_folded(p4d))
700 		return 1;
701 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
702 }
703 
p4d_none(p4d_t p4d)704 static inline int p4d_none(p4d_t p4d)
705 {
706 	if (p4d_folded(p4d))
707 		return 0;
708 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
709 }
710 
p4d_pfn(p4d_t p4d)711 static inline unsigned long p4d_pfn(p4d_t p4d)
712 {
713 	unsigned long origin_mask;
714 
715 	origin_mask = _REGION_ENTRY_ORIGIN;
716 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
717 }
718 
pud_folded(pud_t pud)719 static inline int pud_folded(pud_t pud)
720 {
721 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
722 }
723 
pud_present(pud_t pud)724 static inline int pud_present(pud_t pud)
725 {
726 	if (pud_folded(pud))
727 		return 1;
728 	return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0;
729 }
730 
pud_none(pud_t pud)731 static inline int pud_none(pud_t pud)
732 {
733 	if (pud_folded(pud))
734 		return 0;
735 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
736 }
737 
738 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)739 static inline bool pud_leaf(pud_t pud)
740 {
741 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
742 		return 0;
743 	return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0);
744 }
745 
pmd_present(pmd_t pmd)746 static inline int pmd_present(pmd_t pmd)
747 {
748 	return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0;
749 }
750 
751 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)752 static inline bool pmd_leaf(pmd_t pmd)
753 {
754 	return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0);
755 }
756 
pmd_bad(pmd_t pmd)757 static inline int pmd_bad(pmd_t pmd)
758 {
759 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
760 		return 1;
761 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
762 }
763 
pud_bad(pud_t pud)764 static inline int pud_bad(pud_t pud)
765 {
766 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
767 
768 	if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
769 		return 1;
770 	if (type < _REGION_ENTRY_TYPE_R3)
771 		return 0;
772 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
773 }
774 
p4d_bad(p4d_t p4d)775 static inline int p4d_bad(p4d_t p4d)
776 {
777 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
778 
779 	if (type > _REGION_ENTRY_TYPE_R2)
780 		return 1;
781 	if (type < _REGION_ENTRY_TYPE_R2)
782 		return 0;
783 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
784 }
785 
pmd_none(pmd_t pmd)786 static inline int pmd_none(pmd_t pmd)
787 {
788 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
789 }
790 
791 #define pmd_write pmd_write
pmd_write(pmd_t pmd)792 static inline int pmd_write(pmd_t pmd)
793 {
794 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
795 }
796 
797 #define pud_write pud_write
pud_write(pud_t pud)798 static inline int pud_write(pud_t pud)
799 {
800 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
801 }
802 
803 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)804 static inline int pmd_dirty(pmd_t pmd)
805 {
806 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
807 }
808 
809 #define pmd_young pmd_young
pmd_young(pmd_t pmd)810 static inline int pmd_young(pmd_t pmd)
811 {
812 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
813 }
814 
pte_present(pte_t pte)815 static inline int pte_present(pte_t pte)
816 {
817 	/* Bit pattern: (pte & 0x001) == 0x001 */
818 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
819 }
820 
pte_none(pte_t pte)821 static inline int pte_none(pte_t pte)
822 {
823 	/* Bit pattern: pte == 0x400 */
824 	return pte_val(pte) == _PAGE_INVALID;
825 }
826 
pte_swap(pte_t pte)827 static inline int pte_swap(pte_t pte)
828 {
829 	/* Bit pattern: (pte & 0x201) == 0x200 */
830 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
831 		== _PAGE_PROTECT;
832 }
833 
pte_special(pte_t pte)834 static inline int pte_special(pte_t pte)
835 {
836 	return (pte_val(pte) & _PAGE_SPECIAL);
837 }
838 
839 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)840 static inline int pte_same(pte_t a, pte_t b)
841 {
842 	return pte_val(a) == pte_val(b);
843 }
844 
845 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)846 static inline int pte_protnone(pte_t pte)
847 {
848 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
849 }
850 
pmd_protnone(pmd_t pmd)851 static inline int pmd_protnone(pmd_t pmd)
852 {
853 	/* pmd_leaf(pmd) implies pmd_present(pmd) */
854 	return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
855 }
856 #endif
857 
pte_swp_exclusive(pte_t pte)858 static inline bool pte_swp_exclusive(pte_t pte)
859 {
860 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
861 }
862 
pte_swp_mkexclusive(pte_t pte)863 static inline pte_t pte_swp_mkexclusive(pte_t pte)
864 {
865 	return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
866 }
867 
pte_swp_clear_exclusive(pte_t pte)868 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
869 {
870 	return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
871 }
872 
pte_soft_dirty(pte_t pte)873 static inline int pte_soft_dirty(pte_t pte)
874 {
875 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
876 }
877 #define pte_swp_soft_dirty pte_soft_dirty
878 
pte_mksoft_dirty(pte_t pte)879 static inline pte_t pte_mksoft_dirty(pte_t pte)
880 {
881 	return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
882 }
883 #define pte_swp_mksoft_dirty pte_mksoft_dirty
884 
pte_clear_soft_dirty(pte_t pte)885 static inline pte_t pte_clear_soft_dirty(pte_t pte)
886 {
887 	return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
888 }
889 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
890 
pmd_soft_dirty(pmd_t pmd)891 static inline int pmd_soft_dirty(pmd_t pmd)
892 {
893 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
894 }
895 
pmd_mksoft_dirty(pmd_t pmd)896 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
897 {
898 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
899 }
900 
pmd_clear_soft_dirty(pmd_t pmd)901 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
902 {
903 	return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
904 }
905 
906 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
907 #define pmd_swp_soft_dirty(pmd)		pmd_soft_dirty(pmd)
908 #define pmd_swp_mksoft_dirty(pmd)	pmd_mksoft_dirty(pmd)
909 #define pmd_swp_clear_soft_dirty(pmd)	pmd_clear_soft_dirty(pmd)
910 #endif
911 
912 /*
913  * query functions pte_write/pte_dirty/pte_young only work if
914  * pte_present() is true. Undefined behaviour if not..
915  */
pte_write(pte_t pte)916 static inline int pte_write(pte_t pte)
917 {
918 	return (pte_val(pte) & _PAGE_WRITE) != 0;
919 }
920 
pte_dirty(pte_t pte)921 static inline int pte_dirty(pte_t pte)
922 {
923 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
924 }
925 
pte_young(pte_t pte)926 static inline int pte_young(pte_t pte)
927 {
928 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
929 }
930 
931 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)932 static inline int pte_unused(pte_t pte)
933 {
934 	return pte_val(pte) & _PAGE_UNUSED;
935 }
936 
937 /*
938  * Extract the pgprot value from the given pte while at the same time making it
939  * usable for kernel address space mappings where fault driven dirty and
940  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
941  * must not be set.
942  */
943 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)944 static inline pgprot_t pte_pgprot(pte_t pte)
945 {
946 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
947 
948 	if (pte_write(pte))
949 		pte_flags |= pgprot_val(PAGE_KERNEL);
950 	else
951 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
952 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
953 
954 	return __pgprot(pte_flags);
955 }
956 
957 /*
958  * pgd/pmd/pte modification functions
959  */
960 
set_pgd(pgd_t * pgdp,pgd_t pgd)961 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
962 {
963 	WRITE_ONCE(*pgdp, pgd);
964 }
965 
set_p4d(p4d_t * p4dp,p4d_t p4d)966 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
967 {
968 	WRITE_ONCE(*p4dp, p4d);
969 }
970 
set_pud(pud_t * pudp,pud_t pud)971 static inline void set_pud(pud_t *pudp, pud_t pud)
972 {
973 	WRITE_ONCE(*pudp, pud);
974 }
975 
set_pmd(pmd_t * pmdp,pmd_t pmd)976 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
977 {
978 	WRITE_ONCE(*pmdp, pmd);
979 }
980 
set_pte(pte_t * ptep,pte_t pte)981 static inline void set_pte(pte_t *ptep, pte_t pte)
982 {
983 	WRITE_ONCE(*ptep, pte);
984 }
985 
pgd_clear(pgd_t * pgd)986 static inline void pgd_clear(pgd_t *pgd)
987 {
988 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
989 		set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
990 }
991 
p4d_clear(p4d_t * p4d)992 static inline void p4d_clear(p4d_t *p4d)
993 {
994 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
995 		set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
996 }
997 
pud_clear(pud_t * pud)998 static inline void pud_clear(pud_t *pud)
999 {
1000 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1001 		set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
1002 }
1003 
pmd_clear(pmd_t * pmdp)1004 static inline void pmd_clear(pmd_t *pmdp)
1005 {
1006 	set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1007 }
1008 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1009 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1010 {
1011 	set_pte(ptep, __pte(_PAGE_INVALID));
1012 }
1013 
1014 /*
1015  * The following pte modification functions only work if
1016  * pte_present() is true. Undefined behaviour if not..
1017  */
pte_modify(pte_t pte,pgprot_t newprot)1018 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1019 {
1020 	pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
1021 	pte = set_pte_bit(pte, newprot);
1022 	/*
1023 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
1024 	 * has the invalid bit set, clear it again for readable, young pages
1025 	 */
1026 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
1027 		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1028 	/*
1029 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
1030 	 * protection bit set, clear it again for writable, dirty pages
1031 	 */
1032 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
1033 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1034 	return pte;
1035 }
1036 
pte_wrprotect(pte_t pte)1037 static inline pte_t pte_wrprotect(pte_t pte)
1038 {
1039 	pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1040 	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1041 }
1042 
pte_mkwrite_novma(pte_t pte)1043 static inline pte_t pte_mkwrite_novma(pte_t pte)
1044 {
1045 	pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1046 	if (pte_val(pte) & _PAGE_DIRTY)
1047 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1048 	return pte;
1049 }
1050 
pte_mkclean(pte_t pte)1051 static inline pte_t pte_mkclean(pte_t pte)
1052 {
1053 	pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1054 	return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1055 }
1056 
pte_mkdirty(pte_t pte)1057 static inline pte_t pte_mkdirty(pte_t pte)
1058 {
1059 	pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1060 	if (pte_val(pte) & _PAGE_WRITE)
1061 		pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1062 	return pte;
1063 }
1064 
pte_mkold(pte_t pte)1065 static inline pte_t pte_mkold(pte_t pte)
1066 {
1067 	pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1068 	return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1069 }
1070 
pte_mkyoung(pte_t pte)1071 static inline pte_t pte_mkyoung(pte_t pte)
1072 {
1073 	pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1074 	if (pte_val(pte) & _PAGE_READ)
1075 		pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1076 	return pte;
1077 }
1078 
pte_mkspecial(pte_t pte)1079 static inline pte_t pte_mkspecial(pte_t pte)
1080 {
1081 	return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1082 }
1083 
1084 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)1085 static inline pte_t pte_mkhuge(pte_t pte)
1086 {
1087 	return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1088 }
1089 #endif
1090 
sske_frame(unsigned long addr,unsigned char skey)1091 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
1092 {
1093 	asm volatile("sske %[skey],%[addr],1"
1094 		     : [addr] "+a" (addr) : [skey] "d" (skey));
1095 	return addr;
1096 }
1097 
1098 #define IPTE_GLOBAL	0
1099 #define	IPTE_LOCAL	1
1100 
1101 #define IPTE_NODAT	0x400
1102 #define IPTE_GUEST_ASCE	0x800
1103 
__ptep_rdp(unsigned long addr,pte_t * ptep,int local)1104 static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
1105 {
1106 	unsigned long pto;
1107 
1108 	pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1109 	asm volatile(".insn	rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
1110 		     : "+m" (*ptep)
1111 		     : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
1112 		       [m4] "i" (local));
1113 }
1114 
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1115 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1116 					unsigned long opt, unsigned long asce,
1117 					int local)
1118 {
1119 	unsigned long pto = __pa(ptep);
1120 
1121 	if (__builtin_constant_p(opt) && opt == 0) {
1122 		/* Invalidation + TLB flush for the pte */
1123 		asm volatile(
1124 			"	ipte	%[r1],%[r2],0,%[m4]"
1125 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1126 			  [m4] "i" (local));
1127 		return;
1128 	}
1129 
1130 	/* Invalidate ptes with options + TLB flush of the ptes */
1131 	opt = opt | (asce & _ASCE_ORIGIN);
1132 	asm volatile(
1133 		"	ipte	%[r1],%[r2],%[r3],%[m4]"
1134 		: [r2] "+a" (address), [r3] "+a" (opt)
1135 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1136 }
1137 
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1138 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1139 					      pte_t *ptep, int local)
1140 {
1141 	unsigned long pto = __pa(ptep);
1142 
1143 	/* Invalidate a range of ptes + TLB flush of the ptes */
1144 	do {
1145 		asm volatile(
1146 			"	ipte %[r1],%[r2],%[r3],%[m4]"
1147 			: [r2] "+a" (address), [r3] "+a" (nr)
1148 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1149 	} while (nr != 255);
1150 }
1151 
1152 /*
1153  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1154  * both clear the TLB for the unmapped pte. The reason is that
1155  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1156  * to modify an active pte. The sequence is
1157  *   1) ptep_get_and_clear
1158  *   2) set_pte_at
1159  *   3) flush_tlb_range
1160  * On s390 the tlb needs to get flushed with the modification of the pte
1161  * if the pte is active. The only way how this can be implemented is to
1162  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1163  * is a nop.
1164  */
1165 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1166 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1167 
1168 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1169 static inline bool ptep_test_and_clear_young(struct vm_area_struct *vma,
1170 		unsigned long addr, pte_t *ptep)
1171 {
1172 	pte_t pte = *ptep;
1173 
1174 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1175 	return pte_young(pte);
1176 }
1177 
1178 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1179 static inline bool ptep_clear_flush_young(struct vm_area_struct *vma,
1180 		unsigned long address, pte_t *ptep)
1181 {
1182 	return ptep_test_and_clear_young(vma, address, ptep);
1183 }
1184 
1185 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1186 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1187 				       unsigned long addr, pte_t *ptep)
1188 {
1189 	pte_t res;
1190 
1191 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1192 	/* At this point the reference through the mapping is still present */
1193 	if (mm_is_protected(mm) && pte_present(res))
1194 		WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1195 	page_table_check_pte_clear(mm, addr, res);
1196 	return res;
1197 }
1198 
1199 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1200 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1201 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1202 			     pte_t *, pte_t, pte_t);
1203 
1204 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1205 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1206 				     unsigned long addr, pte_t *ptep)
1207 {
1208 	pte_t res;
1209 
1210 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1211 	/* At this point the reference through the mapping is still present */
1212 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1213 		WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1214 	page_table_check_pte_clear(vma->vm_mm, addr, res);
1215 	return res;
1216 }
1217 
1218 /*
1219  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1220  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1221  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1222  * cannot be accessed while the batched unmap is running. In this case
1223  * full==1 and a simple pte_clear is enough. See tlb.h.
1224  */
1225 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1226 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1227 					    unsigned long addr,
1228 					    pte_t *ptep, int full)
1229 {
1230 	pte_t res;
1231 
1232 	if (full) {
1233 		res = *ptep;
1234 		set_pte(ptep, __pte(_PAGE_INVALID));
1235 	} else {
1236 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1237 	}
1238 
1239 	page_table_check_pte_clear(mm, addr, res);
1240 
1241 	/* Nothing to do */
1242 	if (!mm_is_protected(mm) || !pte_present(res))
1243 		return res;
1244 	/*
1245 	 * At this point the reference through the mapping is still present.
1246 	 * The notifier should have destroyed all protected vCPUs at this
1247 	 * point, so the destroy should be successful.
1248 	 */
1249 	if (full && !uv_destroy_pte(res))
1250 		return res;
1251 	/*
1252 	 * If something went wrong and the page could not be destroyed, or
1253 	 * if this is not a mm teardown, the slower export is used as
1254 	 * fallback instead. If even that fails, print a warning and leak
1255 	 * the page, to avoid crashing the whole system.
1256 	 */
1257 	WARN_ON_ONCE(uv_convert_from_secure_pte(res));
1258 	return res;
1259 }
1260 
1261 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1262 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1263 				      unsigned long addr, pte_t *ptep)
1264 {
1265 	pte_t pte = *ptep;
1266 
1267 	if (pte_write(pte))
1268 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1269 }
1270 
1271 /*
1272  * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1273  * bits in the comparison. Those might change e.g. because of dirty and young
1274  * tracking.
1275  */
pte_allow_rdp(pte_t old,pte_t new)1276 static inline int pte_allow_rdp(pte_t old, pte_t new)
1277 {
1278 	/*
1279 	 * Only allow changes from RO to RW
1280 	 */
1281 	if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1282 		return 0;
1283 
1284 	return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1285 }
1286 
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1287 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1288 						unsigned long address,
1289 						pte_t *ptep)
1290 {
1291 	/*
1292 	 * RDP might not have propagated the PTE protection reset to all CPUs,
1293 	 * so there could be spurious TLB protection faults.
1294 	 * NOTE: This will also be called when a racing pagetable update on
1295 	 * another thread already installed the correct PTE. Both cases cannot
1296 	 * really be distinguished.
1297 	 * Therefore, only do the local TLB flush when RDP can be used, and the
1298 	 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
1299 	 * A local RDP can be used to do the flush.
1300 	 */
1301 	if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
1302 		__ptep_rdp(address, ptep, 1);
1303 }
1304 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1305 
1306 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1307 			 pte_t new);
1308 
1309 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1310 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1311 					unsigned long addr, pte_t *ptep,
1312 					pte_t entry, int dirty)
1313 {
1314 	if (pte_same(*ptep, entry))
1315 		return 0;
1316 	if (cpu_has_rdp() && pte_allow_rdp(*ptep, entry))
1317 		ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1318 	else
1319 		ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1320 	return 1;
1321 }
1322 
1323 #define pgprot_writecombine	pgprot_writecombine
1324 pgprot_t pgprot_writecombine(pgprot_t prot);
1325 
1326 #define PFN_PTE_SHIFT		PAGE_SHIFT
1327 
1328 /*
1329  * Set multiple PTEs to consecutive pages with a single call.  All PTEs
1330  * are within the same folio, PMD and VMA.
1331  */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry,unsigned int nr)1332 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1333 			      pte_t *ptep, pte_t entry, unsigned int nr)
1334 {
1335 	if (pte_present(entry))
1336 		entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1337 	page_table_check_ptes_set(mm, addr, ptep, entry, nr);
1338 	for (;;) {
1339 		set_pte(ptep, entry);
1340 		if (--nr == 0)
1341 			break;
1342 		ptep++;
1343 		entry = __pte(pte_val(entry) + PAGE_SIZE);
1344 	}
1345 }
1346 #define set_ptes set_ptes
1347 
1348 /*
1349  * Conversion functions: convert a page and protection to a page entry,
1350  * and a page entry and page directory to the page they refer to.
1351  */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1352 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1353 {
1354 	pte_t __pte;
1355 
1356 	__pte = __pte(physpage | pgprot_val(pgprot));
1357 	return pte_mkyoung(__pte);
1358 }
1359 
1360 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1361 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1362 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1363 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1364 
1365 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1366 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1367 
pmd_deref(pmd_t pmd)1368 static inline unsigned long pmd_deref(pmd_t pmd)
1369 {
1370 	unsigned long origin_mask;
1371 
1372 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1373 	if (pmd_leaf(pmd))
1374 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1375 	return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1376 }
1377 
pmd_pfn(pmd_t pmd)1378 static inline unsigned long pmd_pfn(pmd_t pmd)
1379 {
1380 	return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1381 }
1382 
pud_deref(pud_t pud)1383 static inline unsigned long pud_deref(pud_t pud)
1384 {
1385 	unsigned long origin_mask;
1386 
1387 	origin_mask = _REGION_ENTRY_ORIGIN;
1388 	if (pud_leaf(pud))
1389 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1390 	return (unsigned long)__va(pud_val(pud) & origin_mask);
1391 }
1392 
1393 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)1394 static inline unsigned long pud_pfn(pud_t pud)
1395 {
1396 	return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1397 }
1398 
1399 /*
1400  * The pgd_offset function *always* adds the index for the top-level
1401  * region/segment table. This is done to get a sequence like the
1402  * following to work:
1403  *	pgdp = pgd_offset(current->mm, addr);
1404  *	pgd = READ_ONCE(*pgdp);
1405  *	p4dp = p4d_offset(&pgd, addr);
1406  *	...
1407  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1408  * only add an index if they dereferenced the pointer.
1409  */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1410 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1411 {
1412 	unsigned long rste;
1413 	unsigned int shift;
1414 
1415 	/* Get the first entry of the top level table */
1416 	rste = pgd_val(*pgd);
1417 	/* Pick up the shift from the table type of the first entry */
1418 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1419 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1420 }
1421 
1422 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1423 
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1424 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1425 {
1426 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1427 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1428 	return (p4d_t *) pgdp;
1429 }
1430 #define p4d_offset_lockless p4d_offset_lockless
1431 
p4d_offset(pgd_t * pgdp,unsigned long address)1432 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1433 {
1434 	return p4d_offset_lockless(pgdp, *pgdp, address);
1435 }
1436 
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1437 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1438 {
1439 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1440 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1441 	return (pud_t *) p4dp;
1442 }
1443 #define pud_offset_lockless pud_offset_lockless
1444 
pud_offset(p4d_t * p4dp,unsigned long address)1445 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1446 {
1447 	return pud_offset_lockless(p4dp, *p4dp, address);
1448 }
1449 #define pud_offset pud_offset
1450 
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1451 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1452 {
1453 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1454 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1455 	return (pmd_t *) pudp;
1456 }
1457 #define pmd_offset_lockless pmd_offset_lockless
1458 
pmd_offset(pud_t * pudp,unsigned long address)1459 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1460 {
1461 	return pmd_offset_lockless(pudp, *pudp, address);
1462 }
1463 #define pmd_offset pmd_offset
1464 
pmd_page_vaddr(pmd_t pmd)1465 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1466 {
1467 	return (unsigned long) pmd_deref(pmd);
1468 }
1469 
gup_fast_permitted(unsigned long start,unsigned long end)1470 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1471 {
1472 	return end <= current->mm->context.asce_limit;
1473 }
1474 #define gup_fast_permitted gup_fast_permitted
1475 
1476 #define pfn_pte(pfn, pgprot)	mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1477 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1478 #define pte_page(x) pfn_to_page(pte_pfn(x))
1479 
1480 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1481 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1482 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1483 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1484 
pmd_wrprotect(pmd_t pmd)1485 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1486 {
1487 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1488 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1489 }
1490 
pmd_mkwrite_novma(pmd_t pmd)1491 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
1492 {
1493 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1494 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1495 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1496 	return pmd;
1497 }
1498 
pmd_mkclean(pmd_t pmd)1499 static inline pmd_t pmd_mkclean(pmd_t pmd)
1500 {
1501 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1502 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1503 }
1504 
pmd_mkdirty(pmd_t pmd)1505 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1506 {
1507 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1508 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1509 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1510 	return pmd;
1511 }
1512 
pud_wrprotect(pud_t pud)1513 static inline pud_t pud_wrprotect(pud_t pud)
1514 {
1515 	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1516 	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1517 }
1518 
pud_mkwrite(pud_t pud)1519 static inline pud_t pud_mkwrite(pud_t pud)
1520 {
1521 	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1522 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1523 		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1524 	return pud;
1525 }
1526 
pud_mkclean(pud_t pud)1527 static inline pud_t pud_mkclean(pud_t pud)
1528 {
1529 	pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1530 	return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1531 }
1532 
pud_mkdirty(pud_t pud)1533 static inline pud_t pud_mkdirty(pud_t pud)
1534 {
1535 	pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1536 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1537 		pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1538 	return pud;
1539 }
1540 
1541 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1542 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1543 {
1544 	/*
1545 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1546 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1547 	 */
1548 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1549 		return pgprot_val(SEGMENT_NONE);
1550 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1551 		return pgprot_val(SEGMENT_RO);
1552 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1553 		return pgprot_val(SEGMENT_RX);
1554 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1555 		return pgprot_val(SEGMENT_RW);
1556 	return pgprot_val(SEGMENT_RWX);
1557 }
1558 
pmd_mkyoung(pmd_t pmd)1559 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1560 {
1561 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1562 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1563 		pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1564 	return pmd;
1565 }
1566 
pmd_mkold(pmd_t pmd)1567 static inline pmd_t pmd_mkold(pmd_t pmd)
1568 {
1569 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1570 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1571 }
1572 
pmd_modify(pmd_t pmd,pgprot_t newprot)1573 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1574 {
1575 	unsigned long mask;
1576 
1577 	mask  = _SEGMENT_ENTRY_ORIGIN_LARGE;
1578 	mask |= _SEGMENT_ENTRY_DIRTY;
1579 	mask |= _SEGMENT_ENTRY_YOUNG;
1580 	mask |=	_SEGMENT_ENTRY_LARGE;
1581 	mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1582 	pmd = __pmd(pmd_val(pmd) & mask);
1583 	pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1584 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1585 		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1586 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1587 		pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1588 	return pmd;
1589 }
1590 
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1591 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1592 {
1593 	return __pmd(physpage + massage_pgprot_pmd(pgprot));
1594 }
1595 
1596 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1597 
__pmdp_cspg(pmd_t * pmdp)1598 static inline void __pmdp_cspg(pmd_t *pmdp)
1599 {
1600 	cspg((unsigned long *)pmdp, pmd_val(*pmdp),
1601 	     pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1602 }
1603 
1604 #define IDTE_GLOBAL	0
1605 #define IDTE_LOCAL	1
1606 
1607 #define IDTE_PTOA	0x0800
1608 #define IDTE_NODAT	0x1000
1609 #define IDTE_GUEST_ASCE	0x2000
1610 
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1611 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1612 					unsigned long opt, unsigned long asce,
1613 					int local)
1614 {
1615 	unsigned long sto;
1616 
1617 	sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1618 	if (__builtin_constant_p(opt) && opt == 0) {
1619 		/* flush without guest asce */
1620 		asm volatile(
1621 			"	idte	%[r1],0,%[r2],%[m4]"
1622 			: "+m" (*pmdp)
1623 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1624 			  [m4] "i" (local)
1625 			: "cc" );
1626 	} else {
1627 		/* flush with guest asce */
1628 		asm volatile(
1629 			"	idte	%[r1],%[r3],%[r2],%[m4]"
1630 			: "+m" (*pmdp)
1631 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1632 			  [r3] "a" (asce), [m4] "i" (local)
1633 			: "cc" );
1634 	}
1635 }
1636 
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1637 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1638 					unsigned long opt, unsigned long asce,
1639 					int local)
1640 {
1641 	unsigned long r3o;
1642 
1643 	r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1644 	r3o |= _ASCE_TYPE_REGION3;
1645 	if (__builtin_constant_p(opt) && opt == 0) {
1646 		/* flush without guest asce */
1647 		asm volatile(
1648 			"	idte	%[r1],0,%[r2],%[m4]"
1649 			: "+m" (*pudp)
1650 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1651 			  [m4] "i" (local)
1652 			: "cc");
1653 	} else {
1654 		/* flush with guest asce */
1655 		asm volatile(
1656 			"	idte	%[r1],%[r3],%[r2],%[m4]"
1657 			: "+m" (*pudp)
1658 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1659 			  [r3] "a" (asce), [m4] "i" (local)
1660 			: "cc" );
1661 	}
1662 }
1663 
1664 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1665 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1666 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1667 
1668 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1669 
1670 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1671 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1672 				pgtable_t pgtable);
1673 
1674 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1675 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1676 
1677 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1678 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1679 					unsigned long addr, pmd_t *pmdp,
1680 					pmd_t entry, int dirty)
1681 {
1682 	VM_BUG_ON(addr & ~HPAGE_MASK);
1683 
1684 	entry = pmd_mkyoung(entry);
1685 	if (dirty)
1686 		entry = pmd_mkdirty(entry);
1687 	if (pmd_val(*pmdp) == pmd_val(entry))
1688 		return 0;
1689 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1690 	return 1;
1691 }
1692 
1693 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1694 static inline bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
1695 		unsigned long addr, pmd_t *pmdp)
1696 {
1697 	pmd_t pmd = *pmdp;
1698 
1699 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1700 	return pmd_young(pmd);
1701 }
1702 
1703 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1704 static inline bool pmdp_clear_flush_young(struct vm_area_struct *vma,
1705 		unsigned long addr, pmd_t *pmdp)
1706 {
1707 	VM_BUG_ON(addr & ~HPAGE_MASK);
1708 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1709 }
1710 
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1711 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1712 			      pmd_t *pmdp, pmd_t entry)
1713 {
1714 	page_table_check_pmd_set(mm, addr, pmdp, entry);
1715 	set_pmd(pmdp, entry);
1716 }
1717 
pmd_mkhuge(pmd_t pmd)1718 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1719 {
1720 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1721 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1722 	return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1723 }
1724 
1725 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1726 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1727 					    unsigned long addr, pmd_t *pmdp)
1728 {
1729 	pmd_t pmd;
1730 
1731 	pmd = pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1732 	page_table_check_pmd_clear(mm, addr, pmd);
1733 	return pmd;
1734 }
1735 
1736 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1737 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1738 						 unsigned long addr,
1739 						 pmd_t *pmdp, int full)
1740 {
1741 	pmd_t pmd;
1742 
1743 	if (full) {
1744 		pmd = *pmdp;
1745 		set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1746 		page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
1747 		return pmd;
1748 	}
1749 	pmd = pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1750 	page_table_check_pmd_clear(vma->vm_mm, addr, pmd);
1751 	return pmd;
1752 }
1753 
1754 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1755 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1756 					  unsigned long addr, pmd_t *pmdp)
1757 {
1758 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1759 }
1760 
1761 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1762 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1763 				   unsigned long addr, pmd_t *pmdp)
1764 {
1765 	pmd_t pmd = *pmdp;
1766 
1767 	VM_WARN_ON_ONCE(!pmd_present(pmd));
1768 	pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1769 #ifdef CONFIG_PAGE_TABLE_CHECK
1770 	pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_READ));
1771 #endif
1772 	page_table_check_pmd_set(vma->vm_mm, addr, pmdp, pmd);
1773 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1774 	return pmd;
1775 }
1776 
1777 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1778 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1779 				      unsigned long addr, pmd_t *pmdp)
1780 {
1781 	pmd_t pmd = *pmdp;
1782 
1783 	if (pmd_write(pmd))
1784 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1785 }
1786 
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1787 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1788 					unsigned long address,
1789 					pmd_t *pmdp)
1790 {
1791 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1792 }
1793 #define pmdp_collapse_flush pmdp_collapse_flush
1794 
1795 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1796 
pmd_trans_huge(pmd_t pmd)1797 static inline int pmd_trans_huge(pmd_t pmd)
1798 {
1799 	return pmd_leaf(pmd);
1800 }
1801 
1802 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1803 static inline int has_transparent_hugepage(void)
1804 {
1805 	return cpu_has_edat1() ? 1 : 0;
1806 }
1807 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1808 
1809 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(struct mm_struct * mm,unsigned long addr,pte_t pte)1810 static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
1811 {
1812 	VM_BUG_ON(mm == &init_mm);
1813 
1814 	return pte_present(pte);
1815 }
1816 
pmd_user_accessible_page(struct mm_struct * mm,unsigned long addr,pmd_t pmd)1817 static inline bool pmd_user_accessible_page(struct mm_struct *mm, unsigned long addr, pmd_t pmd)
1818 {
1819 	VM_BUG_ON(mm == &init_mm);
1820 
1821 	return pmd_leaf(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_READ);
1822 }
1823 
pud_user_accessible_page(struct mm_struct * mm,unsigned long addr,pud_t pud)1824 static inline bool pud_user_accessible_page(struct mm_struct *mm, unsigned long addr, pud_t pud)
1825 {
1826 	VM_BUG_ON(mm == &init_mm);
1827 
1828 	return pud_leaf(pud);
1829 }
1830 #endif
1831 
1832 /*
1833  * 64 bit swap entry format:
1834  * A page-table entry has some bits we have to treat in a special way.
1835  * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1836  * as invalid.
1837  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1838  * |			  offset			|E11XX|type |S0|
1839  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1840  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1841  *
1842  * Bits 0-51 store the offset.
1843  * Bit 52 (E) is used to remember PG_anon_exclusive.
1844  * Bits 57-61 store the type.
1845  * Bit 62 (S) is used for softdirty tracking.
1846  * Bits 55 and 56 (X) are unused.
1847  */
1848 
1849 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1850 #define __SWP_OFFSET_SHIFT	12
1851 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1852 #define __SWP_TYPE_SHIFT	2
1853 
mk_swap_pte(unsigned long type,unsigned long offset)1854 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1855 {
1856 	unsigned long pteval;
1857 
1858 	pteval = _PAGE_INVALID | _PAGE_PROTECT;
1859 	pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1860 	pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1861 	return __pte(pteval);
1862 }
1863 
__swp_type(swp_entry_t entry)1864 static inline unsigned long __swp_type(swp_entry_t entry)
1865 {
1866 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1867 }
1868 
__swp_offset(swp_entry_t entry)1869 static inline unsigned long __swp_offset(swp_entry_t entry)
1870 {
1871 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1872 }
1873 
__swp_entry(unsigned long type,unsigned long offset)1874 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1875 {
1876 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1877 }
1878 
1879 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1880 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1881 
1882 /*
1883  * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE)
1884  * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste
1885  * as invalid.
1886  * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010
1887  * |			  offset			|Xtype |11TT|S0|
1888  * |0000000000111111111122222222223333333333444444444455|555555|5566|66|
1889  * |0123456789012345678901234567890123456789012345678901|234567|8901|23|
1890  *
1891  * Bits 0-51 store the offset.
1892  * Bits 53-57 store the type.
1893  * Bit 62 (S) is used for softdirty tracking.
1894  * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT.
1895  * Bit 52 (X) is unused.
1896  */
1897 
1898 #define __SWP_OFFSET_MASK_RSTE	((1UL << 52) - 1)
1899 #define __SWP_OFFSET_SHIFT_RSTE	12
1900 #define __SWP_TYPE_MASK_RSTE		((1UL << 5) - 1)
1901 #define __SWP_TYPE_SHIFT_RSTE	6
1902 
1903 /*
1904  * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3
1905  * bits 0x01. See also __set_huge_pte_at().
1906  */
mk_swap_rste(unsigned long type,unsigned long offset)1907 static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset)
1908 {
1909 	unsigned long rste;
1910 
1911 	rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM;
1912 	rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE;
1913 	rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE;
1914 	return rste;
1915 }
1916 
__swp_type_rste(swp_entry_t entry)1917 static inline unsigned long __swp_type_rste(swp_entry_t entry)
1918 {
1919 	return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE;
1920 }
1921 
__swp_offset_rste(swp_entry_t entry)1922 static inline unsigned long __swp_offset_rste(swp_entry_t entry)
1923 {
1924 	return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE;
1925 }
1926 
1927 #define __rste_to_swp_entry(rste)	((swp_entry_t) { rste })
1928 
1929 /*
1930  * s390 has different layout for PTE and region / segment table entries (RSTE).
1931  * This is also true for swap entries, and their swap type and offset encoding.
1932  * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and
1933  * __swp_offset_rste() helpers to correctly handle RSTE swap entries.
1934  *
1935  * But common swap code does not know about this difference, and only uses
1936  * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between
1937  * arch-dependent and arch-independent representation of swp_entry_t for all
1938  * pagetable levels. On s390, those helpers only work for PTE swap entries.
1939  *
1940  * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry
1941  * and return the arch-dependent representation of that. Correspondingly,
1942  * implement __swp_entry_to_pmd() to convert that into a proper PMD swap
1943  * entry again. With this, the arch-dependent swp_entry_t representation will
1944  * always look like a PTE swap entry in common code.
1945  *
1946  * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only
1947  * requires conversion of the swap type and offset, and not all the possible
1948  * PTE bits.
1949  */
__pmd_to_swp_entry(pmd_t pmd)1950 static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd)
1951 {
1952 	swp_entry_t arch_entry;
1953 	pte_t pte;
1954 
1955 	arch_entry = __rste_to_swp_entry(pmd_val(pmd));
1956 	pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
1957 	return __pte_to_swp_entry(pte);
1958 }
1959 
__swp_entry_to_pmd(swp_entry_t arch_entry)1960 static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry)
1961 {
1962 	pmd_t pmd;
1963 
1964 	pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry)));
1965 	return pmd;
1966 }
1967 
1968 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1969 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1970 extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1971 extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1972 extern void vmem_unmap_4k_page(unsigned long addr);
1973 extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1974 
1975 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1976 #define HAVE_ARCH_UNMAPPED_AREA
1977 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1978 
1979 #define pmd_pgtable(pmd) \
1980 	((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1981 
1982 #endif /* _S390_PAGE_H */
1983