xref: /linux/arch/s390/include/asm/pgtable.h (revision e5e1bdf0bca8cd16ad39ed2febf6f689d9c07586)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Ulrich Weigand (weigand@de.ibm.com)
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/pgtable.h"
10  */
11 
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14 
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
21 #include <asm/bug.h>
22 #include <asm/page.h>
23 #include <asm/uv.h>
24 
25 extern pgd_t swapper_pg_dir[];
26 extern void paging_init(void);
27 extern unsigned long s390_invalid_asce;
28 
29 enum {
30 	PG_DIRECT_MAP_4K = 0,
31 	PG_DIRECT_MAP_1M,
32 	PG_DIRECT_MAP_2G,
33 	PG_DIRECT_MAP_MAX
34 };
35 
36 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
37 
38 static inline void update_page_count(int level, long count)
39 {
40 	if (IS_ENABLED(CONFIG_PROC_FS))
41 		atomic_long_add(count, &direct_pages_count[level]);
42 }
43 
44 struct seq_file;
45 void arch_report_meminfo(struct seq_file *m);
46 
47 /*
48  * The S390 doesn't have any external MMU info: the kernel page
49  * tables contain all the necessary information.
50  */
51 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53 
54 /*
55  * ZERO_PAGE is a global shared page that is always zero; used
56  * for zero-mapped memory areas etc..
57  */
58 
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
61 
62 #define ZERO_PAGE(vaddr) \
63 	(virt_to_page((void *)(empty_zero_page + \
64 	 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
66 
67 /* TODO: s390 cannot support io_remap_pfn_range... */
68 
69 #define FIRST_USER_ADDRESS  0UL
70 
71 #define pte_ERROR(e) \
72 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
73 #define pmd_ERROR(e) \
74 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
75 #define pud_ERROR(e) \
76 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
77 #define p4d_ERROR(e) \
78 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
79 #define pgd_ERROR(e) \
80 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
81 
82 /*
83  * The vmalloc and module area will always be on the topmost area of the
84  * kernel mapping. 512GB are reserved for vmalloc by default.
85  * At the top of the vmalloc area a 2GB area is reserved where modules
86  * will reside. That makes sure that inter module branches always
87  * happen without trampolines and in addition the placement within a
88  * 2GB frame is branch prediction unit friendly.
89  */
90 extern unsigned long __bootdata_preserved(VMALLOC_START);
91 extern unsigned long __bootdata_preserved(VMALLOC_END);
92 #define VMALLOC_DEFAULT_SIZE	((512UL << 30) - MODULES_LEN)
93 extern struct page *__bootdata_preserved(vmemmap);
94 extern unsigned long __bootdata_preserved(vmemmap_size);
95 
96 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
97 
98 extern unsigned long __bootdata_preserved(MODULES_VADDR);
99 extern unsigned long __bootdata_preserved(MODULES_END);
100 #define MODULES_VADDR	MODULES_VADDR
101 #define MODULES_END	MODULES_END
102 #define MODULES_LEN	(1UL << 31)
103 
104 static inline int is_module_addr(void *addr)
105 {
106 	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
107 	if (addr < (void *)MODULES_VADDR)
108 		return 0;
109 	if (addr > (void *)MODULES_END)
110 		return 0;
111 	return 1;
112 }
113 
114 /*
115  * A 64 bit pagetable entry of S390 has following format:
116  * |			 PFRA			      |0IPC|  OS  |
117  * 0000000000111111111122222222223333333333444444444455555555556666
118  * 0123456789012345678901234567890123456789012345678901234567890123
119  *
120  * I Page-Invalid Bit:    Page is not available for address-translation
121  * P Page-Protection Bit: Store access not possible for page
122  * C Change-bit override: HW is not required to set change bit
123  *
124  * A 64 bit segmenttable entry of S390 has following format:
125  * |        P-table origin                              |      TT
126  * 0000000000111111111122222222223333333333444444444455555555556666
127  * 0123456789012345678901234567890123456789012345678901234567890123
128  *
129  * I Segment-Invalid Bit:    Segment is not available for address-translation
130  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
131  * P Page-Protection Bit: Store access not possible for page
132  * TT Type 00
133  *
134  * A 64 bit region table entry of S390 has following format:
135  * |        S-table origin                             |   TF  TTTL
136  * 0000000000111111111122222222223333333333444444444455555555556666
137  * 0123456789012345678901234567890123456789012345678901234567890123
138  *
139  * I Segment-Invalid Bit:    Segment is not available for address-translation
140  * TT Type 01
141  * TF
142  * TL Table length
143  *
144  * The 64 bit regiontable origin of S390 has following format:
145  * |      region table origon                          |       DTTL
146  * 0000000000111111111122222222223333333333444444444455555555556666
147  * 0123456789012345678901234567890123456789012345678901234567890123
148  *
149  * X Space-Switch event:
150  * G Segment-Invalid Bit:
151  * P Private-Space Bit:
152  * S Storage-Alteration:
153  * R Real space
154  * TL Table-Length:
155  *
156  * A storage key has the following format:
157  * | ACC |F|R|C|0|
158  *  0   3 4 5 6 7
159  * ACC: access key
160  * F  : fetch protection bit
161  * R  : referenced bit
162  * C  : changed bit
163  */
164 
165 /* Hardware bits in the page table entry */
166 #define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
167 #define _PAGE_PROTECT	0x200		/* HW read-only bit  */
168 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
169 #define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
170 
171 /* Software bits in the page table entry */
172 #define _PAGE_PRESENT	0x001		/* SW pte present bit */
173 #define _PAGE_YOUNG	0x004		/* SW pte young bit */
174 #define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
175 #define _PAGE_READ	0x010		/* SW pte read bit */
176 #define _PAGE_WRITE	0x020		/* SW pte write bit */
177 #define _PAGE_SPECIAL	0x040		/* SW associated with special page */
178 #define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
179 
180 #ifdef CONFIG_MEM_SOFT_DIRTY
181 #define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
182 #else
183 #define _PAGE_SOFT_DIRTY 0x000
184 #endif
185 
186 /* Set of bits not changed in pte_modify */
187 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
188 				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
189 
190 /*
191  * handle_pte_fault uses pte_present and pte_none to find out the pte type
192  * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
193  * distinguish present from not-present ptes. It is changed only with the page
194  * table lock held.
195  *
196  * The following table gives the different possible bit combinations for
197  * the pte hardware and software bits in the last 12 bits of a pte
198  * (. unassigned bit, x don't care, t swap type):
199  *
200  *				842100000000
201  *				000084210000
202  *				000000008421
203  *				.IR.uswrdy.p
204  * empty			.10.00000000
205  * swap				.11..ttttt.0
206  * prot-none, clean, old	.11.xx0000.1
207  * prot-none, clean, young	.11.xx0001.1
208  * prot-none, dirty, old	.11.xx0010.1
209  * prot-none, dirty, young	.11.xx0011.1
210  * read-only, clean, old	.11.xx0100.1
211  * read-only, clean, young	.01.xx0101.1
212  * read-only, dirty, old	.11.xx0110.1
213  * read-only, dirty, young	.01.xx0111.1
214  * read-write, clean, old	.11.xx1100.1
215  * read-write, clean, young	.01.xx1101.1
216  * read-write, dirty, old	.10.xx1110.1
217  * read-write, dirty, young	.00.xx1111.1
218  * HW-bits: R read-only, I invalid
219  * SW-bits: p present, y young, d dirty, r read, w write, s special,
220  *	    u unused, l large
221  *
222  * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
223  * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
224  * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
225  */
226 
227 /* Bits in the segment/region table address-space-control-element */
228 #define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
229 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
230 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
231 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
232 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
233 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
234 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
235 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
236 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
237 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
238 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
239 
240 /* Bits in the region table entry */
241 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
242 #define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
243 #define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
244 #define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
245 #define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
246 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region table type mask	    */
247 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
248 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
249 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
250 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
251 
252 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
253 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
254 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
255 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
256 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
257 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
258 
259 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
260 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
261 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
262 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
263 #define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
264 #define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */
265 
266 #ifdef CONFIG_MEM_SOFT_DIRTY
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
268 #else
269 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
270 #endif
271 
272 #define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
273 
274 /* Bits in the segment table entry */
275 #define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
276 #define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
277 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
278 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
279 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
280 #define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
281 #define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
282 #define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
283 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c	/* segment table type mask	    */
284 
285 #define _SEGMENT_ENTRY		(0)
286 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
287 
288 #define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
289 #define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
290 #define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
291 #define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
292 #define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
293 
294 #ifdef CONFIG_MEM_SOFT_DIRTY
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
296 #else
297 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
298 #endif
299 
300 #define _CRST_ENTRIES	2048	/* number of region/segment table entries */
301 #define _PAGE_ENTRIES	256	/* number of page table entries	*/
302 
303 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
304 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
305 
306 #define _REGION1_SHIFT	53
307 #define _REGION2_SHIFT	42
308 #define _REGION3_SHIFT	31
309 #define _SEGMENT_SHIFT	20
310 
311 #define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
312 #define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
313 #define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
314 #define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
315 #define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)
316 
317 #define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
318 #define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
319 #define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
320 #define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)
321 
322 #define _REGION1_MASK	(~(_REGION1_SIZE - 1))
323 #define _REGION2_MASK	(~(_REGION2_SIZE - 1))
324 #define _REGION3_MASK	(~(_REGION3_SIZE - 1))
325 #define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))
326 
327 #define PMD_SHIFT	_SEGMENT_SHIFT
328 #define PUD_SHIFT	_REGION3_SHIFT
329 #define P4D_SHIFT	_REGION2_SHIFT
330 #define PGDIR_SHIFT	_REGION1_SHIFT
331 
332 #define PMD_SIZE	_SEGMENT_SIZE
333 #define PUD_SIZE	_REGION3_SIZE
334 #define P4D_SIZE	_REGION2_SIZE
335 #define PGDIR_SIZE	_REGION1_SIZE
336 
337 #define PMD_MASK	_SEGMENT_MASK
338 #define PUD_MASK	_REGION3_MASK
339 #define P4D_MASK	_REGION2_MASK
340 #define PGDIR_MASK	_REGION1_MASK
341 
342 #define PTRS_PER_PTE	_PAGE_ENTRIES
343 #define PTRS_PER_PMD	_CRST_ENTRIES
344 #define PTRS_PER_PUD	_CRST_ENTRIES
345 #define PTRS_PER_P4D	_CRST_ENTRIES
346 #define PTRS_PER_PGD	_CRST_ENTRIES
347 
348 #define MAX_PTRS_PER_P4D	PTRS_PER_P4D
349 
350 /*
351  * Segment table and region3 table entry encoding
352  * (R = read-only, I = invalid, y = young bit):
353  *				dy..R...I...wr
354  * prot-none, clean, old	00..1...1...00
355  * prot-none, clean, young	01..1...1...00
356  * prot-none, dirty, old	10..1...1...00
357  * prot-none, dirty, young	11..1...1...00
358  * read-only, clean, old	00..1...1...01
359  * read-only, clean, young	01..1...0...01
360  * read-only, dirty, old	10..1...1...01
361  * read-only, dirty, young	11..1...0...01
362  * read-write, clean, old	00..1...1...11
363  * read-write, clean, young	01..1...0...11
364  * read-write, dirty, old	10..0...1...11
365  * read-write, dirty, young	11..0...0...11
366  * The segment table origin is used to distinguish empty (origin==0) from
367  * read-write, old segment table entries (origin!=0)
368  * HW-bits: R read-only, I invalid
369  * SW-bits: y young, d dirty, r read, w write
370  */
371 
372 /* Page status table bits for virtualization */
373 #define PGSTE_ACC_BITS	0xf000000000000000UL
374 #define PGSTE_FP_BIT	0x0800000000000000UL
375 #define PGSTE_PCL_BIT	0x0080000000000000UL
376 #define PGSTE_HR_BIT	0x0040000000000000UL
377 #define PGSTE_HC_BIT	0x0020000000000000UL
378 #define PGSTE_GR_BIT	0x0004000000000000UL
379 #define PGSTE_GC_BIT	0x0002000000000000UL
380 #define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
381 #define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
382 #define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
383 
384 /* Guest Page State used for virtualization */
385 #define _PGSTE_GPS_ZERO			0x0000000080000000UL
386 #define _PGSTE_GPS_NODAT		0x0000000040000000UL
387 #define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
388 #define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
389 #define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
390 #define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
391 #define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
392 
393 /*
394  * A user page table pointer has the space-switch-event bit, the
395  * private-space-control bit and the storage-alteration-event-control
396  * bit set. A kernel page table pointer doesn't need them.
397  */
398 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
399 				 _ASCE_ALT_EVENT)
400 
401 /*
402  * Page protection definitions.
403  */
404 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
408 				 _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
411 #define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
412 				 _PAGE_INVALID | _PAGE_PROTECT)
413 
414 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
419 				 _PAGE_PROTECT | _PAGE_NOEXEC)
420 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
421 				  _PAGE_YOUNG |	_PAGE_DIRTY)
422 
423 /*
424  * On s390 the page table entry has an invalid bit and a read-only bit.
425  * Read permission implies execute permission and write permission
426  * implies read permission.
427  */
428          /*xwr*/
429 #define __P000	PAGE_NONE
430 #define __P001	PAGE_RO
431 #define __P010	PAGE_RO
432 #define __P011	PAGE_RO
433 #define __P100	PAGE_RX
434 #define __P101	PAGE_RX
435 #define __P110	PAGE_RX
436 #define __P111	PAGE_RX
437 
438 #define __S000	PAGE_NONE
439 #define __S001	PAGE_RO
440 #define __S010	PAGE_RW
441 #define __S011	PAGE_RW
442 #define __S100	PAGE_RX
443 #define __S101	PAGE_RX
444 #define __S110	PAGE_RWX
445 #define __S111	PAGE_RWX
446 
447 /*
448  * Segment entry (large page) protection definitions.
449  */
450 #define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
451 				 _SEGMENT_ENTRY_PROTECT)
452 #define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
453 				 _SEGMENT_ENTRY_READ | \
454 				 _SEGMENT_ENTRY_NOEXEC)
455 #define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
456 				 _SEGMENT_ENTRY_READ)
457 #define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
458 				 _SEGMENT_ENTRY_WRITE | \
459 				 _SEGMENT_ENTRY_NOEXEC)
460 #define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
461 				 _SEGMENT_ENTRY_WRITE)
462 #define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
463 				 _SEGMENT_ENTRY_LARGE |	\
464 				 _SEGMENT_ENTRY_READ |	\
465 				 _SEGMENT_ENTRY_WRITE | \
466 				 _SEGMENT_ENTRY_YOUNG | \
467 				 _SEGMENT_ENTRY_DIRTY | \
468 				 _SEGMENT_ENTRY_NOEXEC)
469 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
470 				 _SEGMENT_ENTRY_LARGE |	\
471 				 _SEGMENT_ENTRY_READ |	\
472 				 _SEGMENT_ENTRY_YOUNG |	\
473 				 _SEGMENT_ENTRY_PROTECT | \
474 				 _SEGMENT_ENTRY_NOEXEC)
475 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY |	\
476 				 _SEGMENT_ENTRY_LARGE |	\
477 				 _SEGMENT_ENTRY_READ |	\
478 				 _SEGMENT_ENTRY_WRITE | \
479 				 _SEGMENT_ENTRY_YOUNG |	\
480 				 _SEGMENT_ENTRY_DIRTY)
481 
482 /*
483  * Region3 entry (large page) protection definitions.
484  */
485 
486 #define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
487 				 _REGION3_ENTRY_LARGE |	 \
488 				 _REGION3_ENTRY_READ |	 \
489 				 _REGION3_ENTRY_WRITE |	 \
490 				 _REGION3_ENTRY_YOUNG |	 \
491 				 _REGION3_ENTRY_DIRTY | \
492 				 _REGION_ENTRY_NOEXEC)
493 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
494 				   _REGION3_ENTRY_LARGE |  \
495 				   _REGION3_ENTRY_READ |   \
496 				   _REGION3_ENTRY_YOUNG |  \
497 				   _REGION_ENTRY_PROTECT | \
498 				   _REGION_ENTRY_NOEXEC)
499 
500 static inline bool mm_p4d_folded(struct mm_struct *mm)
501 {
502 	return mm->context.asce_limit <= _REGION1_SIZE;
503 }
504 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
505 
506 static inline bool mm_pud_folded(struct mm_struct *mm)
507 {
508 	return mm->context.asce_limit <= _REGION2_SIZE;
509 }
510 #define mm_pud_folded(mm) mm_pud_folded(mm)
511 
512 static inline bool mm_pmd_folded(struct mm_struct *mm)
513 {
514 	return mm->context.asce_limit <= _REGION3_SIZE;
515 }
516 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
517 
518 static inline int mm_has_pgste(struct mm_struct *mm)
519 {
520 #ifdef CONFIG_PGSTE
521 	if (unlikely(mm->context.has_pgste))
522 		return 1;
523 #endif
524 	return 0;
525 }
526 
527 static inline int mm_is_protected(struct mm_struct *mm)
528 {
529 #ifdef CONFIG_PGSTE
530 	if (unlikely(atomic_read(&mm->context.is_protected)))
531 		return 1;
532 #endif
533 	return 0;
534 }
535 
536 static inline int mm_alloc_pgste(struct mm_struct *mm)
537 {
538 #ifdef CONFIG_PGSTE
539 	if (unlikely(mm->context.alloc_pgste))
540 		return 1;
541 #endif
542 	return 0;
543 }
544 
545 /*
546  * In the case that a guest uses storage keys
547  * faults should no longer be backed by zero pages
548  */
549 #define mm_forbids_zeropage mm_has_pgste
550 static inline int mm_uses_skeys(struct mm_struct *mm)
551 {
552 #ifdef CONFIG_PGSTE
553 	if (mm->context.uses_skeys)
554 		return 1;
555 #endif
556 	return 0;
557 }
558 
559 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
560 {
561 	union register_pair r1 = { .even = old, .odd = new, };
562 	unsigned long address = (unsigned long)ptr | 1;
563 
564 	asm volatile(
565 		"	csp	%[r1],%[address]"
566 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
567 		: [address] "d" (address)
568 		: "cc");
569 }
570 
571 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
572 {
573 	union register_pair r1 = { .even = old, .odd = new, };
574 	unsigned long address = (unsigned long)ptr | 1;
575 
576 	asm volatile(
577 		"	.insn	rre,0xb98a0000,%[r1],%[address]"
578 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
579 		: [address] "d" (address)
580 		: "cc");
581 }
582 
583 #define CRDTE_DTT_PAGE		0x00UL
584 #define CRDTE_DTT_SEGMENT	0x10UL
585 #define CRDTE_DTT_REGION3	0x14UL
586 #define CRDTE_DTT_REGION2	0x18UL
587 #define CRDTE_DTT_REGION1	0x1cUL
588 
589 static inline void crdte(unsigned long old, unsigned long new,
590 			 unsigned long table, unsigned long dtt,
591 			 unsigned long address, unsigned long asce)
592 {
593 	union register_pair r1 = { .even = old, .odd = new, };
594 	union register_pair r2 = { .even = table | dtt, .odd = address, };
595 
596 	asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
597 		     : [r1] "+&d" (r1.pair)
598 		     : [r2] "d" (r2.pair), [asce] "a" (asce)
599 		     : "memory", "cc");
600 }
601 
602 /*
603  * pgd/p4d/pud/pmd/pte query functions
604  */
605 static inline int pgd_folded(pgd_t pgd)
606 {
607 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
608 }
609 
610 static inline int pgd_present(pgd_t pgd)
611 {
612 	if (pgd_folded(pgd))
613 		return 1;
614 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
615 }
616 
617 static inline int pgd_none(pgd_t pgd)
618 {
619 	if (pgd_folded(pgd))
620 		return 0;
621 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
622 }
623 
624 static inline int pgd_bad(pgd_t pgd)
625 {
626 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
627 		return 0;
628 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
629 }
630 
631 static inline unsigned long pgd_pfn(pgd_t pgd)
632 {
633 	unsigned long origin_mask;
634 
635 	origin_mask = _REGION_ENTRY_ORIGIN;
636 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
637 }
638 
639 static inline int p4d_folded(p4d_t p4d)
640 {
641 	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
642 }
643 
644 static inline int p4d_present(p4d_t p4d)
645 {
646 	if (p4d_folded(p4d))
647 		return 1;
648 	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
649 }
650 
651 static inline int p4d_none(p4d_t p4d)
652 {
653 	if (p4d_folded(p4d))
654 		return 0;
655 	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
656 }
657 
658 static inline unsigned long p4d_pfn(p4d_t p4d)
659 {
660 	unsigned long origin_mask;
661 
662 	origin_mask = _REGION_ENTRY_ORIGIN;
663 	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
664 }
665 
666 static inline int pud_folded(pud_t pud)
667 {
668 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
669 }
670 
671 static inline int pud_present(pud_t pud)
672 {
673 	if (pud_folded(pud))
674 		return 1;
675 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
676 }
677 
678 static inline int pud_none(pud_t pud)
679 {
680 	if (pud_folded(pud))
681 		return 0;
682 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
683 }
684 
685 #define pud_leaf	pud_large
686 static inline int pud_large(pud_t pud)
687 {
688 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
689 		return 0;
690 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
691 }
692 
693 #define pmd_leaf	pmd_large
694 static inline int pmd_large(pmd_t pmd)
695 {
696 	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
697 }
698 
699 static inline int pmd_bad(pmd_t pmd)
700 {
701 	if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
702 		return 1;
703 	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
704 }
705 
706 static inline int pud_bad(pud_t pud)
707 {
708 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
709 
710 	if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
711 		return 1;
712 	if (type < _REGION_ENTRY_TYPE_R3)
713 		return 0;
714 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
715 }
716 
717 static inline int p4d_bad(p4d_t p4d)
718 {
719 	unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
720 
721 	if (type > _REGION_ENTRY_TYPE_R2)
722 		return 1;
723 	if (type < _REGION_ENTRY_TYPE_R2)
724 		return 0;
725 	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
726 }
727 
728 static inline int pmd_present(pmd_t pmd)
729 {
730 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
731 }
732 
733 static inline int pmd_none(pmd_t pmd)
734 {
735 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
736 }
737 
738 #define pmd_write pmd_write
739 static inline int pmd_write(pmd_t pmd)
740 {
741 	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
742 }
743 
744 #define pud_write pud_write
745 static inline int pud_write(pud_t pud)
746 {
747 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
748 }
749 
750 static inline int pmd_dirty(pmd_t pmd)
751 {
752 	return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
753 }
754 
755 static inline int pmd_young(pmd_t pmd)
756 {
757 	return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
758 }
759 
760 static inline int pte_present(pte_t pte)
761 {
762 	/* Bit pattern: (pte & 0x001) == 0x001 */
763 	return (pte_val(pte) & _PAGE_PRESENT) != 0;
764 }
765 
766 static inline int pte_none(pte_t pte)
767 {
768 	/* Bit pattern: pte == 0x400 */
769 	return pte_val(pte) == _PAGE_INVALID;
770 }
771 
772 static inline int pte_swap(pte_t pte)
773 {
774 	/* Bit pattern: (pte & 0x201) == 0x200 */
775 	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
776 		== _PAGE_PROTECT;
777 }
778 
779 static inline int pte_special(pte_t pte)
780 {
781 	return (pte_val(pte) & _PAGE_SPECIAL);
782 }
783 
784 #define __HAVE_ARCH_PTE_SAME
785 static inline int pte_same(pte_t a, pte_t b)
786 {
787 	return pte_val(a) == pte_val(b);
788 }
789 
790 #ifdef CONFIG_NUMA_BALANCING
791 static inline int pte_protnone(pte_t pte)
792 {
793 	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
794 }
795 
796 static inline int pmd_protnone(pmd_t pmd)
797 {
798 	/* pmd_large(pmd) implies pmd_present(pmd) */
799 	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
800 }
801 #endif
802 
803 static inline int pte_soft_dirty(pte_t pte)
804 {
805 	return pte_val(pte) & _PAGE_SOFT_DIRTY;
806 }
807 #define pte_swp_soft_dirty pte_soft_dirty
808 
809 static inline pte_t pte_mksoft_dirty(pte_t pte)
810 {
811 	pte_val(pte) |= _PAGE_SOFT_DIRTY;
812 	return pte;
813 }
814 #define pte_swp_mksoft_dirty pte_mksoft_dirty
815 
816 static inline pte_t pte_clear_soft_dirty(pte_t pte)
817 {
818 	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
819 	return pte;
820 }
821 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
822 
823 static inline int pmd_soft_dirty(pmd_t pmd)
824 {
825 	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
826 }
827 
828 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
829 {
830 	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
831 	return pmd;
832 }
833 
834 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
835 {
836 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
837 	return pmd;
838 }
839 
840 /*
841  * query functions pte_write/pte_dirty/pte_young only work if
842  * pte_present() is true. Undefined behaviour if not..
843  */
844 static inline int pte_write(pte_t pte)
845 {
846 	return (pte_val(pte) & _PAGE_WRITE) != 0;
847 }
848 
849 static inline int pte_dirty(pte_t pte)
850 {
851 	return (pte_val(pte) & _PAGE_DIRTY) != 0;
852 }
853 
854 static inline int pte_young(pte_t pte)
855 {
856 	return (pte_val(pte) & _PAGE_YOUNG) != 0;
857 }
858 
859 #define __HAVE_ARCH_PTE_UNUSED
860 static inline int pte_unused(pte_t pte)
861 {
862 	return pte_val(pte) & _PAGE_UNUSED;
863 }
864 
865 /*
866  * Extract the pgprot value from the given pte while at the same time making it
867  * usable for kernel address space mappings where fault driven dirty and
868  * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
869  * must not be set.
870  */
871 static inline pgprot_t pte_pgprot(pte_t pte)
872 {
873 	unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
874 
875 	if (pte_write(pte))
876 		pte_flags |= pgprot_val(PAGE_KERNEL);
877 	else
878 		pte_flags |= pgprot_val(PAGE_KERNEL_RO);
879 	pte_flags |= pte_val(pte) & mio_wb_bit_mask;
880 
881 	return __pgprot(pte_flags);
882 }
883 
884 /*
885  * pgd/pmd/pte modification functions
886  */
887 
888 static inline void pgd_clear(pgd_t *pgd)
889 {
890 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
891 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
892 }
893 
894 static inline void p4d_clear(p4d_t *p4d)
895 {
896 	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
897 		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
898 }
899 
900 static inline void pud_clear(pud_t *pud)
901 {
902 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
903 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
904 }
905 
906 static inline void pmd_clear(pmd_t *pmdp)
907 {
908 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
909 }
910 
911 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
912 {
913 	pte_val(*ptep) = _PAGE_INVALID;
914 }
915 
916 /*
917  * The following pte modification functions only work if
918  * pte_present() is true. Undefined behaviour if not..
919  */
920 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
921 {
922 	pte_val(pte) &= _PAGE_CHG_MASK;
923 	pte_val(pte) |= pgprot_val(newprot);
924 	/*
925 	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
926 	 * has the invalid bit set, clear it again for readable, young pages
927 	 */
928 	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
929 		pte_val(pte) &= ~_PAGE_INVALID;
930 	/*
931 	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
932 	 * protection bit set, clear it again for writable, dirty pages
933 	 */
934 	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
935 		pte_val(pte) &= ~_PAGE_PROTECT;
936 	return pte;
937 }
938 
939 static inline pte_t pte_wrprotect(pte_t pte)
940 {
941 	pte_val(pte) &= ~_PAGE_WRITE;
942 	pte_val(pte) |= _PAGE_PROTECT;
943 	return pte;
944 }
945 
946 static inline pte_t pte_mkwrite(pte_t pte)
947 {
948 	pte_val(pte) |= _PAGE_WRITE;
949 	if (pte_val(pte) & _PAGE_DIRTY)
950 		pte_val(pte) &= ~_PAGE_PROTECT;
951 	return pte;
952 }
953 
954 static inline pte_t pte_mkclean(pte_t pte)
955 {
956 	pte_val(pte) &= ~_PAGE_DIRTY;
957 	pte_val(pte) |= _PAGE_PROTECT;
958 	return pte;
959 }
960 
961 static inline pte_t pte_mkdirty(pte_t pte)
962 {
963 	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
964 	if (pte_val(pte) & _PAGE_WRITE)
965 		pte_val(pte) &= ~_PAGE_PROTECT;
966 	return pte;
967 }
968 
969 static inline pte_t pte_mkold(pte_t pte)
970 {
971 	pte_val(pte) &= ~_PAGE_YOUNG;
972 	pte_val(pte) |= _PAGE_INVALID;
973 	return pte;
974 }
975 
976 static inline pte_t pte_mkyoung(pte_t pte)
977 {
978 	pte_val(pte) |= _PAGE_YOUNG;
979 	if (pte_val(pte) & _PAGE_READ)
980 		pte_val(pte) &= ~_PAGE_INVALID;
981 	return pte;
982 }
983 
984 static inline pte_t pte_mkspecial(pte_t pte)
985 {
986 	pte_val(pte) |= _PAGE_SPECIAL;
987 	return pte;
988 }
989 
990 #ifdef CONFIG_HUGETLB_PAGE
991 static inline pte_t pte_mkhuge(pte_t pte)
992 {
993 	pte_val(pte) |= _PAGE_LARGE;
994 	return pte;
995 }
996 #endif
997 
998 #define IPTE_GLOBAL	0
999 #define	IPTE_LOCAL	1
1000 
1001 #define IPTE_NODAT	0x400
1002 #define IPTE_GUEST_ASCE	0x800
1003 
1004 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1005 					unsigned long opt, unsigned long asce,
1006 					int local)
1007 {
1008 	unsigned long pto = (unsigned long) ptep;
1009 
1010 	if (__builtin_constant_p(opt) && opt == 0) {
1011 		/* Invalidation + TLB flush for the pte */
1012 		asm volatile(
1013 			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1014 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1015 			  [m4] "i" (local));
1016 		return;
1017 	}
1018 
1019 	/* Invalidate ptes with options + TLB flush of the ptes */
1020 	opt = opt | (asce & _ASCE_ORIGIN);
1021 	asm volatile(
1022 		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1023 		: [r2] "+a" (address), [r3] "+a" (opt)
1024 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
1025 }
1026 
1027 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1028 					      pte_t *ptep, int local)
1029 {
1030 	unsigned long pto = (unsigned long) ptep;
1031 
1032 	/* Invalidate a range of ptes + TLB flush of the ptes */
1033 	do {
1034 		asm volatile(
1035 			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1036 			: [r2] "+a" (address), [r3] "+a" (nr)
1037 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
1038 	} while (nr != 255);
1039 }
1040 
1041 /*
1042  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1043  * both clear the TLB for the unmapped pte. The reason is that
1044  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1045  * to modify an active pte. The sequence is
1046  *   1) ptep_get_and_clear
1047  *   2) set_pte_at
1048  *   3) flush_tlb_range
1049  * On s390 the tlb needs to get flushed with the modification of the pte
1050  * if the pte is active. The only way how this can be implemented is to
1051  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1052  * is a nop.
1053  */
1054 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1055 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1056 
1057 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1058 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1059 					    unsigned long addr, pte_t *ptep)
1060 {
1061 	pte_t pte = *ptep;
1062 
1063 	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1064 	return pte_young(pte);
1065 }
1066 
1067 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1068 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1069 					 unsigned long address, pte_t *ptep)
1070 {
1071 	return ptep_test_and_clear_young(vma, address, ptep);
1072 }
1073 
1074 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1075 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1076 				       unsigned long addr, pte_t *ptep)
1077 {
1078 	pte_t res;
1079 
1080 	res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1081 	if (mm_is_protected(mm) && pte_present(res))
1082 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1083 	return res;
1084 }
1085 
1086 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1087 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1088 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1089 			     pte_t *, pte_t, pte_t);
1090 
1091 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1092 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1093 				     unsigned long addr, pte_t *ptep)
1094 {
1095 	pte_t res;
1096 
1097 	res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1098 	if (mm_is_protected(vma->vm_mm) && pte_present(res))
1099 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1100 	return res;
1101 }
1102 
1103 /*
1104  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1105  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1106  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1107  * cannot be accessed while the batched unmap is running. In this case
1108  * full==1 and a simple pte_clear is enough. See tlb.h.
1109  */
1110 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1111 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1112 					    unsigned long addr,
1113 					    pte_t *ptep, int full)
1114 {
1115 	pte_t res;
1116 
1117 	if (full) {
1118 		res = *ptep;
1119 		*ptep = __pte(_PAGE_INVALID);
1120 	} else {
1121 		res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1122 	}
1123 	if (mm_is_protected(mm) && pte_present(res))
1124 		uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1125 	return res;
1126 }
1127 
1128 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1129 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1130 				      unsigned long addr, pte_t *ptep)
1131 {
1132 	pte_t pte = *ptep;
1133 
1134 	if (pte_write(pte))
1135 		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1136 }
1137 
1138 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1139 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1140 					unsigned long addr, pte_t *ptep,
1141 					pte_t entry, int dirty)
1142 {
1143 	if (pte_same(*ptep, entry))
1144 		return 0;
1145 	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1146 	return 1;
1147 }
1148 
1149 /*
1150  * Additional functions to handle KVM guest page tables
1151  */
1152 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1153 		     pte_t *ptep, pte_t entry);
1154 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1155 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1156 		 pte_t *ptep, unsigned long bits);
1157 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1158 		    pte_t *ptep, int prot, unsigned long bit);
1159 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1160 		     pte_t *ptep , int reset);
1161 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1162 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1163 		    pte_t *sptep, pte_t *tptep, pte_t pte);
1164 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1165 
1166 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1167 			    pte_t *ptep);
1168 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1169 			  unsigned char key, bool nq);
1170 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1171 			       unsigned char key, unsigned char *oldkey,
1172 			       bool nq, bool mr, bool mc);
1173 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1174 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1175 			  unsigned char *key);
1176 
1177 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1178 				unsigned long bits, unsigned long value);
1179 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1180 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1181 			unsigned long *oldpte, unsigned long *oldpgste);
1182 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1183 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1184 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1185 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1186 
1187 #define pgprot_writecombine	pgprot_writecombine
1188 pgprot_t pgprot_writecombine(pgprot_t prot);
1189 
1190 #define pgprot_writethrough	pgprot_writethrough
1191 pgprot_t pgprot_writethrough(pgprot_t prot);
1192 
1193 /*
1194  * Certain architectures need to do special things when PTEs
1195  * within a page table are directly modified.  Thus, the following
1196  * hook is made available.
1197  */
1198 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1199 			      pte_t *ptep, pte_t entry)
1200 {
1201 	if (pte_present(entry))
1202 		pte_val(entry) &= ~_PAGE_UNUSED;
1203 	if (mm_has_pgste(mm))
1204 		ptep_set_pte_at(mm, addr, ptep, entry);
1205 	else
1206 		*ptep = entry;
1207 }
1208 
1209 /*
1210  * Conversion functions: convert a page and protection to a page entry,
1211  * and a page entry and page directory to the page they refer to.
1212  */
1213 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1214 {
1215 	pte_t __pte;
1216 
1217 	pte_val(__pte) = physpage | pgprot_val(pgprot);
1218 	if (!MACHINE_HAS_NX)
1219 		pte_val(__pte) &= ~_PAGE_NOEXEC;
1220 	return pte_mkyoung(__pte);
1221 }
1222 
1223 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1224 {
1225 	unsigned long physpage = page_to_phys(page);
1226 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1227 
1228 	if (pte_write(__pte) && PageDirty(page))
1229 		__pte = pte_mkdirty(__pte);
1230 	return __pte;
1231 }
1232 
1233 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1234 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1235 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1236 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1237 
1238 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1239 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1240 
1241 static inline unsigned long pmd_deref(pmd_t pmd)
1242 {
1243 	unsigned long origin_mask;
1244 
1245 	origin_mask = _SEGMENT_ENTRY_ORIGIN;
1246 	if (pmd_large(pmd))
1247 		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1248 	return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1249 }
1250 
1251 static inline unsigned long pmd_pfn(pmd_t pmd)
1252 {
1253 	return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1254 }
1255 
1256 static inline unsigned long pud_deref(pud_t pud)
1257 {
1258 	unsigned long origin_mask;
1259 
1260 	origin_mask = _REGION_ENTRY_ORIGIN;
1261 	if (pud_large(pud))
1262 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1263 	return (unsigned long)__va(pud_val(pud) & origin_mask);
1264 }
1265 
1266 static inline unsigned long pud_pfn(pud_t pud)
1267 {
1268 	return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1269 }
1270 
1271 /*
1272  * The pgd_offset function *always* adds the index for the top-level
1273  * region/segment table. This is done to get a sequence like the
1274  * following to work:
1275  *	pgdp = pgd_offset(current->mm, addr);
1276  *	pgd = READ_ONCE(*pgdp);
1277  *	p4dp = p4d_offset(&pgd, addr);
1278  *	...
1279  * The subsequent p4d_offset, pud_offset and pmd_offset functions
1280  * only add an index if they dereferenced the pointer.
1281  */
1282 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1283 {
1284 	unsigned long rste;
1285 	unsigned int shift;
1286 
1287 	/* Get the first entry of the top level table */
1288 	rste = pgd_val(*pgd);
1289 	/* Pick up the shift from the table type of the first entry */
1290 	shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1291 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1292 }
1293 
1294 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1295 
1296 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1297 {
1298 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1299 		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1300 	return (p4d_t *) pgdp;
1301 }
1302 #define p4d_offset_lockless p4d_offset_lockless
1303 
1304 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1305 {
1306 	return p4d_offset_lockless(pgdp, *pgdp, address);
1307 }
1308 
1309 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1310 {
1311 	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1312 		return (pud_t *) p4d_deref(p4d) + pud_index(address);
1313 	return (pud_t *) p4dp;
1314 }
1315 #define pud_offset_lockless pud_offset_lockless
1316 
1317 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1318 {
1319 	return pud_offset_lockless(p4dp, *p4dp, address);
1320 }
1321 #define pud_offset pud_offset
1322 
1323 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1324 {
1325 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1326 		return (pmd_t *) pud_deref(pud) + pmd_index(address);
1327 	return (pmd_t *) pudp;
1328 }
1329 #define pmd_offset_lockless pmd_offset_lockless
1330 
1331 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1332 {
1333 	return pmd_offset_lockless(pudp, *pudp, address);
1334 }
1335 #define pmd_offset pmd_offset
1336 
1337 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1338 {
1339 	return (unsigned long) pmd_deref(pmd);
1340 }
1341 
1342 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1343 {
1344 	return end <= current->mm->context.asce_limit;
1345 }
1346 #define gup_fast_permitted gup_fast_permitted
1347 
1348 #define pfn_pte(pfn, pgprot)	mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1349 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1350 #define pte_page(x) pfn_to_page(pte_pfn(x))
1351 
1352 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1353 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1354 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1355 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1356 
1357 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1358 {
1359 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1360 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1361 	return pmd;
1362 }
1363 
1364 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1365 {
1366 	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1367 	if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1368 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1369 	return pmd;
1370 }
1371 
1372 static inline pmd_t pmd_mkclean(pmd_t pmd)
1373 {
1374 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1375 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1376 	return pmd;
1377 }
1378 
1379 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1380 {
1381 	pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1382 	if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1383 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1384 	return pmd;
1385 }
1386 
1387 static inline pud_t pud_wrprotect(pud_t pud)
1388 {
1389 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1390 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1391 	return pud;
1392 }
1393 
1394 static inline pud_t pud_mkwrite(pud_t pud)
1395 {
1396 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
1397 	if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1398 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1399 	return pud;
1400 }
1401 
1402 static inline pud_t pud_mkclean(pud_t pud)
1403 {
1404 	pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1405 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
1406 	return pud;
1407 }
1408 
1409 static inline pud_t pud_mkdirty(pud_t pud)
1410 {
1411 	pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1412 	if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1413 		pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1414 	return pud;
1415 }
1416 
1417 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1418 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1419 {
1420 	/*
1421 	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1422 	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1423 	 */
1424 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1425 		return pgprot_val(SEGMENT_NONE);
1426 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1427 		return pgprot_val(SEGMENT_RO);
1428 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1429 		return pgprot_val(SEGMENT_RX);
1430 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1431 		return pgprot_val(SEGMENT_RW);
1432 	return pgprot_val(SEGMENT_RWX);
1433 }
1434 
1435 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1436 {
1437 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1438 	if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1439 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1440 	return pmd;
1441 }
1442 
1443 static inline pmd_t pmd_mkold(pmd_t pmd)
1444 {
1445 	pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1446 	pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1447 	return pmd;
1448 }
1449 
1450 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1451 {
1452 	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1453 		_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1454 		_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1455 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1456 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1457 		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1458 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1459 		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1460 	return pmd;
1461 }
1462 
1463 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1464 {
1465 	pmd_t __pmd;
1466 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1467 	return __pmd;
1468 }
1469 
1470 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1471 
1472 static inline void __pmdp_csp(pmd_t *pmdp)
1473 {
1474 	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1475 	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1476 }
1477 
1478 #define IDTE_GLOBAL	0
1479 #define IDTE_LOCAL	1
1480 
1481 #define IDTE_PTOA	0x0800
1482 #define IDTE_NODAT	0x1000
1483 #define IDTE_GUEST_ASCE	0x2000
1484 
1485 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1486 					unsigned long opt, unsigned long asce,
1487 					int local)
1488 {
1489 	unsigned long sto;
1490 
1491 	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1492 	if (__builtin_constant_p(opt) && opt == 0) {
1493 		/* flush without guest asce */
1494 		asm volatile(
1495 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1496 			: "+m" (*pmdp)
1497 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1498 			  [m4] "i" (local)
1499 			: "cc" );
1500 	} else {
1501 		/* flush with guest asce */
1502 		asm volatile(
1503 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1504 			: "+m" (*pmdp)
1505 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1506 			  [r3] "a" (asce), [m4] "i" (local)
1507 			: "cc" );
1508 	}
1509 }
1510 
1511 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1512 					unsigned long opt, unsigned long asce,
1513 					int local)
1514 {
1515 	unsigned long r3o;
1516 
1517 	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1518 	r3o |= _ASCE_TYPE_REGION3;
1519 	if (__builtin_constant_p(opt) && opt == 0) {
1520 		/* flush without guest asce */
1521 		asm volatile(
1522 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1523 			: "+m" (*pudp)
1524 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1525 			  [m4] "i" (local)
1526 			: "cc");
1527 	} else {
1528 		/* flush with guest asce */
1529 		asm volatile(
1530 			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1531 			: "+m" (*pudp)
1532 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1533 			  [r3] "a" (asce), [m4] "i" (local)
1534 			: "cc" );
1535 	}
1536 }
1537 
1538 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1539 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1540 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1541 
1542 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1543 
1544 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1545 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1546 				pgtable_t pgtable);
1547 
1548 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1549 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1550 
1551 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1552 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1553 					unsigned long addr, pmd_t *pmdp,
1554 					pmd_t entry, int dirty)
1555 {
1556 	VM_BUG_ON(addr & ~HPAGE_MASK);
1557 
1558 	entry = pmd_mkyoung(entry);
1559 	if (dirty)
1560 		entry = pmd_mkdirty(entry);
1561 	if (pmd_val(*pmdp) == pmd_val(entry))
1562 		return 0;
1563 	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1564 	return 1;
1565 }
1566 
1567 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1568 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1569 					    unsigned long addr, pmd_t *pmdp)
1570 {
1571 	pmd_t pmd = *pmdp;
1572 
1573 	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1574 	return pmd_young(pmd);
1575 }
1576 
1577 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1578 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1579 					 unsigned long addr, pmd_t *pmdp)
1580 {
1581 	VM_BUG_ON(addr & ~HPAGE_MASK);
1582 	return pmdp_test_and_clear_young(vma, addr, pmdp);
1583 }
1584 
1585 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1586 			      pmd_t *pmdp, pmd_t entry)
1587 {
1588 	if (!MACHINE_HAS_NX)
1589 		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1590 	*pmdp = entry;
1591 }
1592 
1593 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1594 {
1595 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1596 	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1597 	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1598 	return pmd;
1599 }
1600 
1601 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1602 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1603 					    unsigned long addr, pmd_t *pmdp)
1604 {
1605 	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1606 }
1607 
1608 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1609 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1610 						 unsigned long addr,
1611 						 pmd_t *pmdp, int full)
1612 {
1613 	if (full) {
1614 		pmd_t pmd = *pmdp;
1615 		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1616 		return pmd;
1617 	}
1618 	return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1619 }
1620 
1621 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1622 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1623 					  unsigned long addr, pmd_t *pmdp)
1624 {
1625 	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1626 }
1627 
1628 #define __HAVE_ARCH_PMDP_INVALIDATE
1629 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1630 				   unsigned long addr, pmd_t *pmdp)
1631 {
1632 	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1633 
1634 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1635 }
1636 
1637 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1638 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1639 				      unsigned long addr, pmd_t *pmdp)
1640 {
1641 	pmd_t pmd = *pmdp;
1642 
1643 	if (pmd_write(pmd))
1644 		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1645 }
1646 
1647 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1648 					unsigned long address,
1649 					pmd_t *pmdp)
1650 {
1651 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1652 }
1653 #define pmdp_collapse_flush pmdp_collapse_flush
1654 
1655 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1656 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1657 
1658 static inline int pmd_trans_huge(pmd_t pmd)
1659 {
1660 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1661 }
1662 
1663 #define has_transparent_hugepage has_transparent_hugepage
1664 static inline int has_transparent_hugepage(void)
1665 {
1666 	return MACHINE_HAS_EDAT1 ? 1 : 0;
1667 }
1668 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1669 
1670 /*
1671  * 64 bit swap entry format:
1672  * A page-table entry has some bits we have to treat in a special way.
1673  * Bits 52 and bit 55 have to be zero, otherwise a specification
1674  * exception will occur instead of a page translation exception. The
1675  * specification exception has the bad habit not to store necessary
1676  * information in the lowcore.
1677  * Bits 54 and 63 are used to indicate the page type.
1678  * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1679  * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1680  * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1681  * for the offset.
1682  * |			  offset			|01100|type |00|
1683  * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1684  * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1685  */
1686 
1687 #define __SWP_OFFSET_MASK	((1UL << 52) - 1)
1688 #define __SWP_OFFSET_SHIFT	12
1689 #define __SWP_TYPE_MASK		((1UL << 5) - 1)
1690 #define __SWP_TYPE_SHIFT	2
1691 
1692 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1693 {
1694 	pte_t pte;
1695 
1696 	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1697 	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1698 	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1699 	return pte;
1700 }
1701 
1702 static inline unsigned long __swp_type(swp_entry_t entry)
1703 {
1704 	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1705 }
1706 
1707 static inline unsigned long __swp_offset(swp_entry_t entry)
1708 {
1709 	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1710 }
1711 
1712 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1713 {
1714 	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1715 }
1716 
1717 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1718 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1719 
1720 #define kern_addr_valid(addr)   (1)
1721 
1722 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1723 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1724 extern int s390_enable_sie(void);
1725 extern int s390_enable_skey(void);
1726 extern void s390_reset_cmma(struct mm_struct *mm);
1727 
1728 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1729 #define HAVE_ARCH_UNMAPPED_AREA
1730 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1731 
1732 #endif /* _S390_PAGE_H */
1733