xref: /linux/arch/s390/include/asm/pgtable.h (revision c39b9fd728d8173ecda993524089fbc38211a17f)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2000
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  *               Ulrich Weigand (weigand@de.ibm.com)
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/pgtable.h"
9  */
10 
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
13 
14 /*
15  * The Linux memory management assumes a three-level page table setup. For
16  * s390 31 bit we "fold" the mid level into the top-level page table, so
17  * that we physically have the same two-level page table as the s390 mmu
18  * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19  * the hardware provides (region first and region second tables are not
20  * used).
21  *
22  * The "pgd_xxx()" functions are trivial for a folded two-level
23  * setup: the pgd is never bad, and a pmd always exists (as it's folded
24  * into the pgd entry)
25  *
26  * This file contains the functions and defines necessary to modify and use
27  * the S390 page table tree.
28  */
29 #ifndef __ASSEMBLY__
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <asm/bug.h>
34 #include <asm/page.h>
35 
36 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
39 
40 /*
41  * The S390 doesn't have any external MMU info: the kernel page
42  * tables contain all the necessary information.
43  */
44 #define update_mmu_cache(vma, address, ptep)     do { } while (0)
45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
46 
47 /*
48  * ZERO_PAGE is a global shared page that is always zero; used
49  * for zero-mapped memory areas etc..
50  */
51 
52 extern unsigned long empty_zero_page;
53 extern unsigned long zero_page_mask;
54 
55 #define ZERO_PAGE(vaddr) \
56 	(virt_to_page((void *)(empty_zero_page + \
57 	 (((unsigned long)(vaddr)) &zero_page_mask))))
58 #define __HAVE_COLOR_ZERO_PAGE
59 
60 /* TODO: s390 cannot support io_remap_pfn_range... */
61 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) 	       \
62 	remap_pfn_range(vma, vaddr, pfn, size, prot)
63 
64 #endif /* !__ASSEMBLY__ */
65 
66 /*
67  * PMD_SHIFT determines the size of the area a second-level page
68  * table can map
69  * PGDIR_SHIFT determines what a third-level page table entry can map
70  */
71 #ifndef CONFIG_64BIT
72 # define PMD_SHIFT	20
73 # define PUD_SHIFT	20
74 # define PGDIR_SHIFT	20
75 #else /* CONFIG_64BIT */
76 # define PMD_SHIFT	20
77 # define PUD_SHIFT	31
78 # define PGDIR_SHIFT	42
79 #endif /* CONFIG_64BIT */
80 
81 #define PMD_SIZE        (1UL << PMD_SHIFT)
82 #define PMD_MASK        (~(PMD_SIZE-1))
83 #define PUD_SIZE	(1UL << PUD_SHIFT)
84 #define PUD_MASK	(~(PUD_SIZE-1))
85 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
86 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
87 
88 /*
89  * entries per page directory level: the S390 is two-level, so
90  * we don't really have any PMD directory physically.
91  * for S390 segment-table entries are combined to one PGD
92  * that leads to 1024 pte per pgd
93  */
94 #define PTRS_PER_PTE	256
95 #ifndef CONFIG_64BIT
96 #define PTRS_PER_PMD	1
97 #define PTRS_PER_PUD	1
98 #else /* CONFIG_64BIT */
99 #define PTRS_PER_PMD	2048
100 #define PTRS_PER_PUD	2048
101 #endif /* CONFIG_64BIT */
102 #define PTRS_PER_PGD	2048
103 
104 #define FIRST_USER_ADDRESS  0
105 
106 #define pte_ERROR(e) \
107 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
108 #define pmd_ERROR(e) \
109 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
110 #define pud_ERROR(e) \
111 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
112 #define pgd_ERROR(e) \
113 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
114 
115 #ifndef __ASSEMBLY__
116 /*
117  * The vmalloc and module area will always be on the topmost area of the kernel
118  * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
119  * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
120  * modules will reside. That makes sure that inter module branches always
121  * happen without trampolines and in addition the placement within a 2GB frame
122  * is branch prediction unit friendly.
123  */
124 extern unsigned long VMALLOC_START;
125 extern unsigned long VMALLOC_END;
126 extern struct page *vmemmap;
127 
128 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
129 
130 #ifdef CONFIG_64BIT
131 extern unsigned long MODULES_VADDR;
132 extern unsigned long MODULES_END;
133 #define MODULES_VADDR	MODULES_VADDR
134 #define MODULES_END	MODULES_END
135 #define MODULES_LEN	(1UL << 31)
136 #endif
137 
138 /*
139  * A 31 bit pagetable entry of S390 has following format:
140  *  |   PFRA          |    |  OS  |
141  * 0                   0IP0
142  * 00000000001111111111222222222233
143  * 01234567890123456789012345678901
144  *
145  * I Page-Invalid Bit:    Page is not available for address-translation
146  * P Page-Protection Bit: Store access not possible for page
147  *
148  * A 31 bit segmenttable entry of S390 has following format:
149  *  |   P-table origin      |  |PTL
150  * 0                         IC
151  * 00000000001111111111222222222233
152  * 01234567890123456789012345678901
153  *
154  * I Segment-Invalid Bit:    Segment is not available for address-translation
155  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
156  * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
157  *
158  * The 31 bit segmenttable origin of S390 has following format:
159  *
160  *  |S-table origin   |     | STL |
161  * X                   **GPS
162  * 00000000001111111111222222222233
163  * 01234567890123456789012345678901
164  *
165  * X Space-Switch event:
166  * G Segment-Invalid Bit:     *
167  * P Private-Space Bit:       Segment is not private (PoP 3-30)
168  * S Storage-Alteration:
169  * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
170  *
171  * A 64 bit pagetable entry of S390 has following format:
172  * |			 PFRA			      |0IPC|  OS  |
173  * 0000000000111111111122222222223333333333444444444455555555556666
174  * 0123456789012345678901234567890123456789012345678901234567890123
175  *
176  * I Page-Invalid Bit:    Page is not available for address-translation
177  * P Page-Protection Bit: Store access not possible for page
178  * C Change-bit override: HW is not required to set change bit
179  *
180  * A 64 bit segmenttable entry of S390 has following format:
181  * |        P-table origin                              |      TT
182  * 0000000000111111111122222222223333333333444444444455555555556666
183  * 0123456789012345678901234567890123456789012345678901234567890123
184  *
185  * I Segment-Invalid Bit:    Segment is not available for address-translation
186  * C Common-Segment Bit:     Segment is not private (PoP 3-30)
187  * P Page-Protection Bit: Store access not possible for page
188  * TT Type 00
189  *
190  * A 64 bit region table entry of S390 has following format:
191  * |        S-table origin                             |   TF  TTTL
192  * 0000000000111111111122222222223333333333444444444455555555556666
193  * 0123456789012345678901234567890123456789012345678901234567890123
194  *
195  * I Segment-Invalid Bit:    Segment is not available for address-translation
196  * TT Type 01
197  * TF
198  * TL Table length
199  *
200  * The 64 bit regiontable origin of S390 has following format:
201  * |      region table origon                          |       DTTL
202  * 0000000000111111111122222222223333333333444444444455555555556666
203  * 0123456789012345678901234567890123456789012345678901234567890123
204  *
205  * X Space-Switch event:
206  * G Segment-Invalid Bit:
207  * P Private-Space Bit:
208  * S Storage-Alteration:
209  * R Real space
210  * TL Table-Length:
211  *
212  * A storage key has the following format:
213  * | ACC |F|R|C|0|
214  *  0   3 4 5 6 7
215  * ACC: access key
216  * F  : fetch protection bit
217  * R  : referenced bit
218  * C  : changed bit
219  */
220 
221 /* Hardware bits in the page table entry */
222 #define _PAGE_CO	0x100		/* HW Change-bit override */
223 #define _PAGE_RO	0x200		/* HW read-only bit  */
224 #define _PAGE_INVALID	0x400		/* HW invalid bit    */
225 
226 /* Software bits in the page table entry */
227 #define _PAGE_SWT	0x001		/* SW pte type bit t */
228 #define _PAGE_SWX	0x002		/* SW pte type bit x */
229 #define _PAGE_SWC	0x004		/* SW pte changed bit */
230 #define _PAGE_SWR	0x008		/* SW pte referenced bit */
231 #define _PAGE_SWW	0x010		/* SW pte write bit */
232 #define _PAGE_SPECIAL	0x020		/* SW associated with special page */
233 #define __HAVE_ARCH_PTE_SPECIAL
234 
235 /* Set of bits not changed in pte_modify */
236 #define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
237 				 _PAGE_SWC | _PAGE_SWR)
238 
239 /* Six different types of pages. */
240 #define _PAGE_TYPE_EMPTY	0x400
241 #define _PAGE_TYPE_NONE		0x401
242 #define _PAGE_TYPE_SWAP		0x403
243 #define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */
244 #define _PAGE_TYPE_RO		0x200
245 #define _PAGE_TYPE_RW		0x000
246 
247 /*
248  * Only four types for huge pages, using the invalid bit and protection bit
249  * of a segment table entry.
250  */
251 #define _HPAGE_TYPE_EMPTY	0x020	/* _SEGMENT_ENTRY_INV */
252 #define _HPAGE_TYPE_NONE	0x220
253 #define _HPAGE_TYPE_RO		0x200	/* _SEGMENT_ENTRY_RO  */
254 #define _HPAGE_TYPE_RW		0x000
255 
256 /*
257  * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
258  * pte_none and pte_file to find out the pte type WITHOUT holding the page
259  * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
260  * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
261  * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
262  * This change is done while holding the lock, but the intermediate step
263  * of a previously valid pte with the hw invalid bit set can be observed by
264  * handle_pte_fault. That makes it necessary that all valid pte types with
265  * the hw invalid bit set must be distinguishable from the four pte types
266  * empty, none, swap and file.
267  *
268  *			irxt  ipte  irxt
269  * _PAGE_TYPE_EMPTY	1000   ->   1000
270  * _PAGE_TYPE_NONE	1001   ->   1001
271  * _PAGE_TYPE_SWAP	1011   ->   1011
272  * _PAGE_TYPE_FILE	11?1   ->   11?1
273  * _PAGE_TYPE_RO	0100   ->   1100
274  * _PAGE_TYPE_RW	0000   ->   1000
275  *
276  * pte_none is true for bits combinations 1000, 1010, 1100, 1110
277  * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
278  * pte_file is true for bits combinations 1101, 1111
279  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
280  */
281 
282 #ifndef CONFIG_64BIT
283 
284 /* Bits in the segment table address-space-control-element */
285 #define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */
286 #define _ASCE_ORIGIN_MASK	0x7ffff000UL	/* segment table origin	    */
287 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
288 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
289 #define _ASCE_TABLE_LENGTH	0x7f	/* 128 x 64 entries = 8k	    */
290 
291 /* Bits in the segment table entry */
292 #define _SEGMENT_ENTRY_ORIGIN	0x7fffffc0UL	/* page table origin	    */
293 #define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
294 #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
295 #define _SEGMENT_ENTRY_COMMON	0x10	/* common segment bit		    */
296 #define _SEGMENT_ENTRY_PTL	0x0f	/* page table length		    */
297 
298 #define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PTL)
299 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
300 
301 /* Page status table bits for virtualization */
302 #define RCP_ACC_BITS	0xf0000000UL
303 #define RCP_FP_BIT	0x08000000UL
304 #define RCP_PCL_BIT	0x00800000UL
305 #define RCP_HR_BIT	0x00400000UL
306 #define RCP_HC_BIT	0x00200000UL
307 #define RCP_GR_BIT	0x00040000UL
308 #define RCP_GC_BIT	0x00020000UL
309 
310 /* User dirty / referenced bit for KVM's migration feature */
311 #define KVM_UR_BIT	0x00008000UL
312 #define KVM_UC_BIT	0x00004000UL
313 
314 #else /* CONFIG_64BIT */
315 
316 /* Bits in the segment/region table address-space-control-element */
317 #define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
318 #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
319 #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
320 #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
321 #define _ASCE_REAL_SPACE	0x20	/* real space control		    */
322 #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
323 #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
324 #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
325 #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
326 #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
327 #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */
328 
329 /* Bits in the region table entry */
330 #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
331 #define _REGION_ENTRY_RO	0x200	/* region protection bit	    */
332 #define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */
333 #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
334 #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
335 #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
336 #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
337 #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */
338 
339 #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
340 #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
341 #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
342 #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
343 #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
344 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
345 
346 #define _REGION3_ENTRY_LARGE	0x400	/* RTTE-format control, large page  */
347 #define _REGION3_ENTRY_RO	0x200	/* page protection bit		    */
348 #define _REGION3_ENTRY_CO	0x100	/* change-recording override	    */
349 
350 /* Bits in the segment table entry */
351 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
352 #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
353 #define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
354 #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
355 
356 #define _SEGMENT_ENTRY		(0)
357 #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
358 
359 #define _SEGMENT_ENTRY_LARGE	0x400	/* STE-format control, large page   */
360 #define _SEGMENT_ENTRY_CO	0x100	/* change-recording override   */
361 #define _SEGMENT_ENTRY_SPLIT_BIT 0	/* THP splitting bit number */
362 #define _SEGMENT_ENTRY_SPLIT	(1UL << _SEGMENT_ENTRY_SPLIT_BIT)
363 
364 /* Set of bits not changed in pmd_modify */
365 #define _SEGMENT_CHG_MASK	(_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
366 				 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
367 
368 /* Page status table bits for virtualization */
369 #define RCP_ACC_BITS	0xf000000000000000UL
370 #define RCP_FP_BIT	0x0800000000000000UL
371 #define RCP_PCL_BIT	0x0080000000000000UL
372 #define RCP_HR_BIT	0x0040000000000000UL
373 #define RCP_HC_BIT	0x0020000000000000UL
374 #define RCP_GR_BIT	0x0004000000000000UL
375 #define RCP_GC_BIT	0x0002000000000000UL
376 
377 /* User dirty / referenced bit for KVM's migration feature */
378 #define KVM_UR_BIT	0x0000800000000000UL
379 #define KVM_UC_BIT	0x0000400000000000UL
380 
381 #endif /* CONFIG_64BIT */
382 
383 /*
384  * A user page table pointer has the space-switch-event bit, the
385  * private-space-control bit and the storage-alteration-event-control
386  * bit set. A kernel page table pointer doesn't need them.
387  */
388 #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
389 				 _ASCE_ALT_EVENT)
390 
391 /*
392  * Page protection definitions.
393  */
394 #define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE)
395 #define PAGE_RO		__pgprot(_PAGE_TYPE_RO)
396 #define PAGE_RW		__pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
397 #define PAGE_RWC	__pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
398 
399 #define PAGE_KERNEL	PAGE_RWC
400 #define PAGE_SHARED	PAGE_KERNEL
401 #define PAGE_COPY	PAGE_RO
402 
403 /*
404  * On s390 the page table entry has an invalid bit and a read-only bit.
405  * Read permission implies execute permission and write permission
406  * implies read permission.
407  */
408          /*xwr*/
409 #define __P000	PAGE_NONE
410 #define __P001	PAGE_RO
411 #define __P010	PAGE_RO
412 #define __P011	PAGE_RO
413 #define __P100	PAGE_RO
414 #define __P101	PAGE_RO
415 #define __P110	PAGE_RO
416 #define __P111	PAGE_RO
417 
418 #define __S000	PAGE_NONE
419 #define __S001	PAGE_RO
420 #define __S010	PAGE_RW
421 #define __S011	PAGE_RW
422 #define __S100	PAGE_RO
423 #define __S101	PAGE_RO
424 #define __S110	PAGE_RW
425 #define __S111	PAGE_RW
426 
427 /*
428  * Segment entry (large page) protection definitions.
429  */
430 #define SEGMENT_NONE	__pgprot(_HPAGE_TYPE_NONE)
431 #define SEGMENT_RO	__pgprot(_HPAGE_TYPE_RO)
432 #define SEGMENT_RW	__pgprot(_HPAGE_TYPE_RW)
433 
434 static inline int mm_exclusive(struct mm_struct *mm)
435 {
436 	return likely(mm == current->active_mm &&
437 		      atomic_read(&mm->context.attach_count) <= 1);
438 }
439 
440 static inline int mm_has_pgste(struct mm_struct *mm)
441 {
442 #ifdef CONFIG_PGSTE
443 	if (unlikely(mm->context.has_pgste))
444 		return 1;
445 #endif
446 	return 0;
447 }
448 /*
449  * pgd/pmd/pte query functions
450  */
451 #ifndef CONFIG_64BIT
452 
453 static inline int pgd_present(pgd_t pgd) { return 1; }
454 static inline int pgd_none(pgd_t pgd)    { return 0; }
455 static inline int pgd_bad(pgd_t pgd)     { return 0; }
456 
457 static inline int pud_present(pud_t pud) { return 1; }
458 static inline int pud_none(pud_t pud)	 { return 0; }
459 static inline int pud_large(pud_t pud)	 { return 0; }
460 static inline int pud_bad(pud_t pud)	 { return 0; }
461 
462 #else /* CONFIG_64BIT */
463 
464 static inline int pgd_present(pgd_t pgd)
465 {
466 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
467 		return 1;
468 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
469 }
470 
471 static inline int pgd_none(pgd_t pgd)
472 {
473 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
474 		return 0;
475 	return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
476 }
477 
478 static inline int pgd_bad(pgd_t pgd)
479 {
480 	/*
481 	 * With dynamic page table levels the pgd can be a region table
482 	 * entry or a segment table entry. Check for the bit that are
483 	 * invalid for either table entry.
484 	 */
485 	unsigned long mask =
486 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
487 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
488 	return (pgd_val(pgd) & mask) != 0;
489 }
490 
491 static inline int pud_present(pud_t pud)
492 {
493 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
494 		return 1;
495 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
496 }
497 
498 static inline int pud_none(pud_t pud)
499 {
500 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
501 		return 0;
502 	return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
503 }
504 
505 static inline int pud_large(pud_t pud)
506 {
507 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
508 		return 0;
509 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
510 }
511 
512 static inline int pud_bad(pud_t pud)
513 {
514 	/*
515 	 * With dynamic page table levels the pud can be a region table
516 	 * entry or a segment table entry. Check for the bit that are
517 	 * invalid for either table entry.
518 	 */
519 	unsigned long mask =
520 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
521 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
522 	return (pud_val(pud) & mask) != 0;
523 }
524 
525 #endif /* CONFIG_64BIT */
526 
527 static inline int pmd_present(pmd_t pmd)
528 {
529 	unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
530 	return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
531 	       !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
532 }
533 
534 static inline int pmd_none(pmd_t pmd)
535 {
536 	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
537 	       !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
538 }
539 
540 static inline int pmd_large(pmd_t pmd)
541 {
542 #ifdef CONFIG_64BIT
543 	return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
544 #else
545 	return 0;
546 #endif
547 }
548 
549 static inline int pmd_bad(pmd_t pmd)
550 {
551 	unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
552 	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
553 }
554 
555 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
556 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
557 				 unsigned long addr, pmd_t *pmdp);
558 
559 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
560 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
561 				 unsigned long address, pmd_t *pmdp,
562 				 pmd_t entry, int dirty);
563 
564 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
565 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
566 				  unsigned long address, pmd_t *pmdp);
567 
568 #define __HAVE_ARCH_PMD_WRITE
569 static inline int pmd_write(pmd_t pmd)
570 {
571 	return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
572 }
573 
574 static inline int pmd_young(pmd_t pmd)
575 {
576 	return 0;
577 }
578 
579 static inline int pte_none(pte_t pte)
580 {
581 	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
582 }
583 
584 static inline int pte_present(pte_t pte)
585 {
586 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
587 	return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
588 		(!(pte_val(pte) & _PAGE_INVALID) &&
589 		 !(pte_val(pte) & _PAGE_SWT));
590 }
591 
592 static inline int pte_file(pte_t pte)
593 {
594 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
595 	return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
596 }
597 
598 static inline int pte_special(pte_t pte)
599 {
600 	return (pte_val(pte) & _PAGE_SPECIAL);
601 }
602 
603 #define __HAVE_ARCH_PTE_SAME
604 static inline int pte_same(pte_t a, pte_t b)
605 {
606 	return pte_val(a) == pte_val(b);
607 }
608 
609 static inline pgste_t pgste_get_lock(pte_t *ptep)
610 {
611 	unsigned long new = 0;
612 #ifdef CONFIG_PGSTE
613 	unsigned long old;
614 
615 	preempt_disable();
616 	asm(
617 		"	lg	%0,%2\n"
618 		"0:	lgr	%1,%0\n"
619 		"	nihh	%0,0xff7f\n"	/* clear RCP_PCL_BIT in old */
620 		"	oihh	%1,0x0080\n"	/* set RCP_PCL_BIT in new */
621 		"	csg	%0,%1,%2\n"
622 		"	jl	0b\n"
623 		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
624 		: "Q" (ptep[PTRS_PER_PTE]) : "cc");
625 #endif
626 	return __pgste(new);
627 }
628 
629 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
630 {
631 #ifdef CONFIG_PGSTE
632 	asm(
633 		"	nihh	%1,0xff7f\n"	/* clear RCP_PCL_BIT */
634 		"	stg	%1,%0\n"
635 		: "=Q" (ptep[PTRS_PER_PTE])
636 		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
637 	preempt_enable();
638 #endif
639 }
640 
641 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
642 {
643 #ifdef CONFIG_PGSTE
644 	unsigned long address, bits;
645 	unsigned char skey;
646 
647 	if (!pte_present(*ptep))
648 		return pgste;
649 	address = pte_val(*ptep) & PAGE_MASK;
650 	skey = page_get_storage_key(address);
651 	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
652 	/* Clear page changed & referenced bit in the storage key */
653 	if (bits & _PAGE_CHANGED)
654 		page_set_storage_key(address, skey ^ bits, 0);
655 	else if (bits)
656 		page_reset_referenced(address);
657 	/* Transfer page changed & referenced bit to guest bits in pgste */
658 	pgste_val(pgste) |= bits << 48;		/* RCP_GR_BIT & RCP_GC_BIT */
659 	/* Get host changed & referenced bits from pgste */
660 	bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
661 	/* Transfer page changed & referenced bit to kvm user bits */
662 	pgste_val(pgste) |= bits << 45;		/* KVM_UR_BIT & KVM_UC_BIT */
663 	/* Clear relevant host bits in pgste. */
664 	pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
665 	pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
666 	/* Copy page access key and fetch protection bit to pgste */
667 	pgste_val(pgste) |=
668 		(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
669 	/* Transfer referenced bit to pte */
670 	pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
671 #endif
672 	return pgste;
673 
674 }
675 
676 static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
677 {
678 #ifdef CONFIG_PGSTE
679 	int young;
680 
681 	if (!pte_present(*ptep))
682 		return pgste;
683 	/* Get referenced bit from storage key */
684 	young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
685 	if (young)
686 		pgste_val(pgste) |= RCP_GR_BIT;
687 	/* Get host referenced bit from pgste */
688 	if (pgste_val(pgste) & RCP_HR_BIT) {
689 		pgste_val(pgste) &= ~RCP_HR_BIT;
690 		young = 1;
691 	}
692 	/* Transfer referenced bit to kvm user bits and pte */
693 	if (young) {
694 		pgste_val(pgste) |= KVM_UR_BIT;
695 		pte_val(*ptep) |= _PAGE_SWR;
696 	}
697 #endif
698 	return pgste;
699 }
700 
701 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
702 {
703 #ifdef CONFIG_PGSTE
704 	unsigned long address;
705 	unsigned long okey, nkey;
706 
707 	if (!pte_present(entry))
708 		return;
709 	address = pte_val(entry) & PAGE_MASK;
710 	okey = nkey = page_get_storage_key(address);
711 	nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
712 	/* Set page access key and fetch protection bit from pgste */
713 	nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
714 	if (okey != nkey)
715 		page_set_storage_key(address, nkey, 0);
716 #endif
717 }
718 
719 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
720 {
721 	if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
722 		/*
723 		 * Without enhanced suppression-on-protection force
724 		 * the dirty bit on for all writable ptes.
725 		 */
726 		pte_val(entry) |= _PAGE_SWC;
727 		pte_val(entry) &= ~_PAGE_RO;
728 	}
729 	*ptep = entry;
730 }
731 
732 /**
733  * struct gmap_struct - guest address space
734  * @mm: pointer to the parent mm_struct
735  * @table: pointer to the page directory
736  * @asce: address space control element for gmap page table
737  * @crst_list: list of all crst tables used in the guest address space
738  */
739 struct gmap {
740 	struct list_head list;
741 	struct mm_struct *mm;
742 	unsigned long *table;
743 	unsigned long asce;
744 	struct list_head crst_list;
745 };
746 
747 /**
748  * struct gmap_rmap - reverse mapping for segment table entries
749  * @next: pointer to the next gmap_rmap structure in the list
750  * @entry: pointer to a segment table entry
751  */
752 struct gmap_rmap {
753 	struct list_head list;
754 	unsigned long *entry;
755 };
756 
757 /**
758  * struct gmap_pgtable - gmap information attached to a page table
759  * @vmaddr: address of the 1MB segment in the process virtual memory
760  * @mapper: list of segment table entries maping a page table
761  */
762 struct gmap_pgtable {
763 	unsigned long vmaddr;
764 	struct list_head mapper;
765 };
766 
767 struct gmap *gmap_alloc(struct mm_struct *mm);
768 void gmap_free(struct gmap *gmap);
769 void gmap_enable(struct gmap *gmap);
770 void gmap_disable(struct gmap *gmap);
771 int gmap_map_segment(struct gmap *gmap, unsigned long from,
772 		     unsigned long to, unsigned long length);
773 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
774 unsigned long __gmap_translate(unsigned long address, struct gmap *);
775 unsigned long gmap_translate(unsigned long address, struct gmap *);
776 unsigned long __gmap_fault(unsigned long address, struct gmap *);
777 unsigned long gmap_fault(unsigned long address, struct gmap *);
778 void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
779 
780 /*
781  * Certain architectures need to do special things when PTEs
782  * within a page table are directly modified.  Thus, the following
783  * hook is made available.
784  */
785 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
786 			      pte_t *ptep, pte_t entry)
787 {
788 	pgste_t pgste;
789 
790 	if (mm_has_pgste(mm)) {
791 		pgste = pgste_get_lock(ptep);
792 		pgste_set_key(ptep, pgste, entry);
793 		pgste_set_pte(ptep, entry);
794 		pgste_set_unlock(ptep, pgste);
795 	} else {
796 		if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
797 			pte_val(entry) |= _PAGE_CO;
798 		*ptep = entry;
799 	}
800 }
801 
802 /*
803  * query functions pte_write/pte_dirty/pte_young only work if
804  * pte_present() is true. Undefined behaviour if not..
805  */
806 static inline int pte_write(pte_t pte)
807 {
808 	return (pte_val(pte) & _PAGE_SWW) != 0;
809 }
810 
811 static inline int pte_dirty(pte_t pte)
812 {
813 	return (pte_val(pte) & _PAGE_SWC) != 0;
814 }
815 
816 static inline int pte_young(pte_t pte)
817 {
818 #ifdef CONFIG_PGSTE
819 	if (pte_val(pte) & _PAGE_SWR)
820 		return 1;
821 #endif
822 	return 0;
823 }
824 
825 /*
826  * pgd/pmd/pte modification functions
827  */
828 
829 static inline void pgd_clear(pgd_t *pgd)
830 {
831 #ifdef CONFIG_64BIT
832 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
833 		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
834 #endif
835 }
836 
837 static inline void pud_clear(pud_t *pud)
838 {
839 #ifdef CONFIG_64BIT
840 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
841 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
842 #endif
843 }
844 
845 static inline void pmd_clear(pmd_t *pmdp)
846 {
847 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
848 }
849 
850 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
851 {
852 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
853 }
854 
855 /*
856  * The following pte modification functions only work if
857  * pte_present() is true. Undefined behaviour if not..
858  */
859 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
860 {
861 	pte_val(pte) &= _PAGE_CHG_MASK;
862 	pte_val(pte) |= pgprot_val(newprot);
863 	if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
864 		pte_val(pte) &= ~_PAGE_RO;
865 	return pte;
866 }
867 
868 static inline pte_t pte_wrprotect(pte_t pte)
869 {
870 	pte_val(pte) &= ~_PAGE_SWW;
871 	/* Do not clobber _PAGE_TYPE_NONE pages!  */
872 	if (!(pte_val(pte) & _PAGE_INVALID))
873 		pte_val(pte) |= _PAGE_RO;
874 	return pte;
875 }
876 
877 static inline pte_t pte_mkwrite(pte_t pte)
878 {
879 	pte_val(pte) |= _PAGE_SWW;
880 	if (pte_val(pte) & _PAGE_SWC)
881 		pte_val(pte) &= ~_PAGE_RO;
882 	return pte;
883 }
884 
885 static inline pte_t pte_mkclean(pte_t pte)
886 {
887 	pte_val(pte) &= ~_PAGE_SWC;
888 	/* Do not clobber _PAGE_TYPE_NONE pages!  */
889 	if (!(pte_val(pte) & _PAGE_INVALID))
890 		pte_val(pte) |= _PAGE_RO;
891 	return pte;
892 }
893 
894 static inline pte_t pte_mkdirty(pte_t pte)
895 {
896 	pte_val(pte) |= _PAGE_SWC;
897 	if (pte_val(pte) & _PAGE_SWW)
898 		pte_val(pte) &= ~_PAGE_RO;
899 	return pte;
900 }
901 
902 static inline pte_t pte_mkold(pte_t pte)
903 {
904 #ifdef CONFIG_PGSTE
905 	pte_val(pte) &= ~_PAGE_SWR;
906 #endif
907 	return pte;
908 }
909 
910 static inline pte_t pte_mkyoung(pte_t pte)
911 {
912 	return pte;
913 }
914 
915 static inline pte_t pte_mkspecial(pte_t pte)
916 {
917 	pte_val(pte) |= _PAGE_SPECIAL;
918 	return pte;
919 }
920 
921 #ifdef CONFIG_HUGETLB_PAGE
922 static inline pte_t pte_mkhuge(pte_t pte)
923 {
924 	pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
925 	return pte;
926 }
927 #endif
928 
929 /*
930  * Get (and clear) the user dirty bit for a pte.
931  */
932 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
933 						 pte_t *ptep)
934 {
935 	pgste_t pgste;
936 	int dirty = 0;
937 
938 	if (mm_has_pgste(mm)) {
939 		pgste = pgste_get_lock(ptep);
940 		pgste = pgste_update_all(ptep, pgste);
941 		dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
942 		pgste_val(pgste) &= ~KVM_UC_BIT;
943 		pgste_set_unlock(ptep, pgste);
944 		return dirty;
945 	}
946 	return dirty;
947 }
948 
949 /*
950  * Get (and clear) the user referenced bit for a pte.
951  */
952 static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
953 						 pte_t *ptep)
954 {
955 	pgste_t pgste;
956 	int young = 0;
957 
958 	if (mm_has_pgste(mm)) {
959 		pgste = pgste_get_lock(ptep);
960 		pgste = pgste_update_young(ptep, pgste);
961 		young = !!(pgste_val(pgste) & KVM_UR_BIT);
962 		pgste_val(pgste) &= ~KVM_UR_BIT;
963 		pgste_set_unlock(ptep, pgste);
964 	}
965 	return young;
966 }
967 
968 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
969 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
970 					    unsigned long addr, pte_t *ptep)
971 {
972 	pgste_t pgste;
973 	pte_t pte;
974 
975 	if (mm_has_pgste(vma->vm_mm)) {
976 		pgste = pgste_get_lock(ptep);
977 		pgste = pgste_update_young(ptep, pgste);
978 		pte = *ptep;
979 		*ptep = pte_mkold(pte);
980 		pgste_set_unlock(ptep, pgste);
981 		return pte_young(pte);
982 	}
983 	return 0;
984 }
985 
986 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
987 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
988 					 unsigned long address, pte_t *ptep)
989 {
990 	/* No need to flush TLB
991 	 * On s390 reference bits are in storage key and never in TLB
992 	 * With virtualization we handle the reference bit, without we
993 	 * we can simply return */
994 	return ptep_test_and_clear_young(vma, address, ptep);
995 }
996 
997 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
998 {
999 	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1000 #ifndef CONFIG_64BIT
1001 		/* pto must point to the start of the segment table */
1002 		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
1003 #else
1004 		/* ipte in zarch mode can do the math */
1005 		pte_t *pto = ptep;
1006 #endif
1007 		asm volatile(
1008 			"	ipte	%2,%3"
1009 			: "=m" (*ptep) : "m" (*ptep),
1010 			  "a" (pto), "a" (address));
1011 	}
1012 }
1013 
1014 /*
1015  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1016  * both clear the TLB for the unmapped pte. The reason is that
1017  * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1018  * to modify an active pte. The sequence is
1019  *   1) ptep_get_and_clear
1020  *   2) set_pte_at
1021  *   3) flush_tlb_range
1022  * On s390 the tlb needs to get flushed with the modification of the pte
1023  * if the pte is active. The only way how this can be implemented is to
1024  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1025  * is a nop.
1026  */
1027 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1028 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1029 				       unsigned long address, pte_t *ptep)
1030 {
1031 	pgste_t pgste;
1032 	pte_t pte;
1033 
1034 	mm->context.flush_mm = 1;
1035 	if (mm_has_pgste(mm))
1036 		pgste = pgste_get_lock(ptep);
1037 
1038 	pte = *ptep;
1039 	if (!mm_exclusive(mm))
1040 		__ptep_ipte(address, ptep);
1041 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1042 
1043 	if (mm_has_pgste(mm)) {
1044 		pgste = pgste_update_all(&pte, pgste);
1045 		pgste_set_unlock(ptep, pgste);
1046 	}
1047 	return pte;
1048 }
1049 
1050 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1051 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1052 					   unsigned long address,
1053 					   pte_t *ptep)
1054 {
1055 	pte_t pte;
1056 
1057 	mm->context.flush_mm = 1;
1058 	if (mm_has_pgste(mm))
1059 		pgste_get_lock(ptep);
1060 
1061 	pte = *ptep;
1062 	if (!mm_exclusive(mm))
1063 		__ptep_ipte(address, ptep);
1064 	return pte;
1065 }
1066 
1067 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1068 					   unsigned long address,
1069 					   pte_t *ptep, pte_t pte)
1070 {
1071 	if (mm_has_pgste(mm)) {
1072 		pgste_set_pte(ptep, pte);
1073 		pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
1074 	} else
1075 		*ptep = pte;
1076 }
1077 
1078 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1079 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1080 				     unsigned long address, pte_t *ptep)
1081 {
1082 	pgste_t pgste;
1083 	pte_t pte;
1084 
1085 	if (mm_has_pgste(vma->vm_mm))
1086 		pgste = pgste_get_lock(ptep);
1087 
1088 	pte = *ptep;
1089 	__ptep_ipte(address, ptep);
1090 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1091 
1092 	if (mm_has_pgste(vma->vm_mm)) {
1093 		pgste = pgste_update_all(&pte, pgste);
1094 		pgste_set_unlock(ptep, pgste);
1095 	}
1096 	return pte;
1097 }
1098 
1099 /*
1100  * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1101  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1102  * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1103  * cannot be accessed while the batched unmap is running. In this case
1104  * full==1 and a simple pte_clear is enough. See tlb.h.
1105  */
1106 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1107 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1108 					    unsigned long address,
1109 					    pte_t *ptep, int full)
1110 {
1111 	pgste_t pgste;
1112 	pte_t pte;
1113 
1114 	if (mm_has_pgste(mm))
1115 		pgste = pgste_get_lock(ptep);
1116 
1117 	pte = *ptep;
1118 	if (!full)
1119 		__ptep_ipte(address, ptep);
1120 	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1121 
1122 	if (mm_has_pgste(mm)) {
1123 		pgste = pgste_update_all(&pte, pgste);
1124 		pgste_set_unlock(ptep, pgste);
1125 	}
1126 	return pte;
1127 }
1128 
1129 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1130 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1131 				       unsigned long address, pte_t *ptep)
1132 {
1133 	pgste_t pgste;
1134 	pte_t pte = *ptep;
1135 
1136 	if (pte_write(pte)) {
1137 		mm->context.flush_mm = 1;
1138 		if (mm_has_pgste(mm))
1139 			pgste = pgste_get_lock(ptep);
1140 
1141 		if (!mm_exclusive(mm))
1142 			__ptep_ipte(address, ptep);
1143 		pte = pte_wrprotect(pte);
1144 
1145 		if (mm_has_pgste(mm)) {
1146 			pgste_set_pte(ptep, pte);
1147 			pgste_set_unlock(ptep, pgste);
1148 		} else
1149 			*ptep = pte;
1150 	}
1151 	return pte;
1152 }
1153 
1154 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1155 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1156 					unsigned long address, pte_t *ptep,
1157 					pte_t entry, int dirty)
1158 {
1159 	pgste_t pgste;
1160 
1161 	if (pte_same(*ptep, entry))
1162 		return 0;
1163 	if (mm_has_pgste(vma->vm_mm))
1164 		pgste = pgste_get_lock(ptep);
1165 
1166 	__ptep_ipte(address, ptep);
1167 
1168 	if (mm_has_pgste(vma->vm_mm)) {
1169 		pgste_set_pte(ptep, entry);
1170 		pgste_set_unlock(ptep, pgste);
1171 	} else
1172 		*ptep = entry;
1173 	return 1;
1174 }
1175 
1176 /*
1177  * Conversion functions: convert a page and protection to a page entry,
1178  * and a page entry and page directory to the page they refer to.
1179  */
1180 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1181 {
1182 	pte_t __pte;
1183 	pte_val(__pte) = physpage + pgprot_val(pgprot);
1184 	return __pte;
1185 }
1186 
1187 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1188 {
1189 	unsigned long physpage = page_to_phys(page);
1190 	pte_t __pte = mk_pte_phys(physpage, pgprot);
1191 
1192 	if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
1193 		pte_val(__pte) |= _PAGE_SWC;
1194 		pte_val(__pte) &= ~_PAGE_RO;
1195 	}
1196 	return __pte;
1197 }
1198 
1199 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1200 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1201 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1202 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1203 
1204 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1205 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1206 
1207 #ifndef CONFIG_64BIT
1208 
1209 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1210 #define pud_deref(pmd) ({ BUG(); 0UL; })
1211 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1212 
1213 #define pud_offset(pgd, address) ((pud_t *) pgd)
1214 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1215 
1216 #else /* CONFIG_64BIT */
1217 
1218 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1219 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1220 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1221 
1222 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1223 {
1224 	pud_t *pud = (pud_t *) pgd;
1225 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1226 		pud = (pud_t *) pgd_deref(*pgd);
1227 	return pud  + pud_index(address);
1228 }
1229 
1230 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1231 {
1232 	pmd_t *pmd = (pmd_t *) pud;
1233 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1234 		pmd = (pmd_t *) pud_deref(*pud);
1235 	return pmd + pmd_index(address);
1236 }
1237 
1238 #endif /* CONFIG_64BIT */
1239 
1240 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1241 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1242 #define pte_page(x) pfn_to_page(pte_pfn(x))
1243 
1244 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1245 
1246 /* Find an entry in the lowest level page table.. */
1247 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1248 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1249 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1250 #define pte_unmap(pte) do { } while (0)
1251 
1252 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1253 {
1254 	unsigned long sto = (unsigned long) pmdp -
1255 			    pmd_index(address) * sizeof(pmd_t);
1256 
1257 	if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
1258 		asm volatile(
1259 			"	.insn	rrf,0xb98e0000,%2,%3,0,0"
1260 			: "=m" (*pmdp)
1261 			: "m" (*pmdp), "a" (sto),
1262 			  "a" ((address & HPAGE_MASK))
1263 			: "cc"
1264 		);
1265 	}
1266 }
1267 
1268 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1269 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1270 {
1271 	/*
1272 	 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
1273 	 * Convert to segment table entry format.
1274 	 */
1275 	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1276 		return pgprot_val(SEGMENT_NONE);
1277 	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1278 		return pgprot_val(SEGMENT_RO);
1279 	return pgprot_val(SEGMENT_RW);
1280 }
1281 
1282 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1283 {
1284 	pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1285 	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1286 	return pmd;
1287 }
1288 
1289 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1290 {
1291 	pmd_t __pmd;
1292 	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1293 	return __pmd;
1294 }
1295 
1296 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1297 {
1298 	/* Do not clobber _HPAGE_TYPE_NONE pages! */
1299 	if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
1300 		pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1301 	return pmd;
1302 }
1303 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1304 
1305 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1306 
1307 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1308 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1309 
1310 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1311 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1312 
1313 static inline int pmd_trans_splitting(pmd_t pmd)
1314 {
1315 	return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1316 }
1317 
1318 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1319 			      pmd_t *pmdp, pmd_t entry)
1320 {
1321 	if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1322 		pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1323 	*pmdp = entry;
1324 }
1325 
1326 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1327 {
1328 	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1329 	return pmd;
1330 }
1331 
1332 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1333 {
1334 	pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
1335 	return pmd;
1336 }
1337 
1338 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1339 {
1340 	/* No dirty bit in the segment table entry. */
1341 	return pmd;
1342 }
1343 
1344 static inline pmd_t pmd_mkold(pmd_t pmd)
1345 {
1346 	/* No referenced bit in the segment table entry. */
1347 	return pmd;
1348 }
1349 
1350 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1351 {
1352 	/* No referenced bit in the segment table entry. */
1353 	return pmd;
1354 }
1355 
1356 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1357 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1358 					    unsigned long address, pmd_t *pmdp)
1359 {
1360 	unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1361 	long tmp, rc;
1362 	int counter;
1363 
1364 	rc = 0;
1365 	if (MACHINE_HAS_RRBM) {
1366 		counter = PTRS_PER_PTE >> 6;
1367 		asm volatile(
1368 			"0:	.insn	rre,0xb9ae0000,%0,%3\n"	/* rrbm */
1369 			"	ogr	%1,%0\n"
1370 			"	la	%3,0(%4,%3)\n"
1371 			"	brct	%2,0b\n"
1372 			: "=&d" (tmp), "+&d" (rc), "+d" (counter),
1373 			  "+a" (pmd_addr)
1374 			: "a" (64 * 4096UL) : "cc");
1375 		rc = !!rc;
1376 	} else {
1377 		counter = PTRS_PER_PTE;
1378 		asm volatile(
1379 			"0:	rrbe	0,%2\n"
1380 			"	la	%2,0(%3,%2)\n"
1381 			"	brc	12,1f\n"
1382 			"	lhi	%0,1\n"
1383 			"1:	brct	%1,0b\n"
1384 			: "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1385 			: "a" (4096UL) : "cc");
1386 	}
1387 	return rc;
1388 }
1389 
1390 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1391 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1392 				       unsigned long address, pmd_t *pmdp)
1393 {
1394 	pmd_t pmd = *pmdp;
1395 
1396 	__pmd_idte(address, pmdp);
1397 	pmd_clear(pmdp);
1398 	return pmd;
1399 }
1400 
1401 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1402 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1403 				     unsigned long address, pmd_t *pmdp)
1404 {
1405 	return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1406 }
1407 
1408 #define __HAVE_ARCH_PMDP_INVALIDATE
1409 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1410 				   unsigned long address, pmd_t *pmdp)
1411 {
1412 	__pmd_idte(address, pmdp);
1413 }
1414 
1415 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1416 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1417 				      unsigned long address, pmd_t *pmdp)
1418 {
1419 	pmd_t pmd = *pmdp;
1420 
1421 	if (pmd_write(pmd)) {
1422 		__pmd_idte(address, pmdp);
1423 		set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1424 	}
1425 }
1426 
1427 #define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1428 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
1429 
1430 static inline int pmd_trans_huge(pmd_t pmd)
1431 {
1432 	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1433 }
1434 
1435 static inline int has_transparent_hugepage(void)
1436 {
1437 	return MACHINE_HAS_HPAGE ? 1 : 0;
1438 }
1439 
1440 static inline unsigned long pmd_pfn(pmd_t pmd)
1441 {
1442 	return pmd_val(pmd) >> PAGE_SHIFT;
1443 }
1444 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1445 
1446 /*
1447  * 31 bit swap entry format:
1448  * A page-table entry has some bits we have to treat in a special way.
1449  * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1450  * exception will occur instead of a page translation exception. The
1451  * specifiation exception has the bad habit not to store necessary
1452  * information in the lowcore.
1453  * Bit 21 and bit 22 are the page invalid bit and the page protection
1454  * bit. We set both to indicate a swapped page.
1455  * Bit 30 and 31 are used to distinguish the different page types. For
1456  * a swapped page these bits need to be zero.
1457  * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1458  * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1459  * plus 24 for the offset.
1460  * 0|     offset        |0110|o|type |00|
1461  * 0 0000000001111111111 2222 2 22222 33
1462  * 0 1234567890123456789 0123 4 56789 01
1463  *
1464  * 64 bit swap entry format:
1465  * A page-table entry has some bits we have to treat in a special way.
1466  * Bits 52 and bit 55 have to be zero, otherwise an specification
1467  * exception will occur instead of a page translation exception. The
1468  * specifiation exception has the bad habit not to store necessary
1469  * information in the lowcore.
1470  * Bit 53 and bit 54 are the page invalid bit and the page protection
1471  * bit. We set both to indicate a swapped page.
1472  * Bit 62 and 63 are used to distinguish the different page types. For
1473  * a swapped page these bits need to be zero.
1474  * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1475  * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1476  * plus 56 for the offset.
1477  * |                      offset                        |0110|o|type |00|
1478  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1479  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1480  */
1481 #ifndef CONFIG_64BIT
1482 #define __SWP_OFFSET_MASK (~0UL >> 12)
1483 #else
1484 #define __SWP_OFFSET_MASK (~0UL >> 11)
1485 #endif
1486 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1487 {
1488 	pte_t pte;
1489 	offset &= __SWP_OFFSET_MASK;
1490 	pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1491 		((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1492 	return pte;
1493 }
1494 
1495 #define __swp_type(entry)	(((entry).val >> 2) & 0x1f)
1496 #define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))
1497 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1498 
1499 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
1500 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
1501 
1502 #ifndef CONFIG_64BIT
1503 # define PTE_FILE_MAX_BITS	26
1504 #else /* CONFIG_64BIT */
1505 # define PTE_FILE_MAX_BITS	59
1506 #endif /* CONFIG_64BIT */
1507 
1508 #define pte_to_pgoff(__pte) \
1509 	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1510 
1511 #define pgoff_to_pte(__off) \
1512 	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1513 		   | _PAGE_TYPE_FILE })
1514 
1515 #endif /* !__ASSEMBLY__ */
1516 
1517 #define kern_addr_valid(addr)   (1)
1518 
1519 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1520 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1521 extern int s390_enable_sie(void);
1522 
1523 /*
1524  * No page table caches to initialise
1525  */
1526 static inline void pgtable_cache_init(void) { }
1527 static inline void check_pgt_cache(void) { }
1528 
1529 #include <asm-generic/pgtable.h>
1530 
1531 #endif /* _S390_PAGE_H */
1532