xref: /linux/arch/s390/include/asm/page.h (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2000
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  */
6 
7 #ifndef _S390_PAGE_H
8 #define _S390_PAGE_H
9 
10 #include <linux/const.h>
11 #include <asm/types.h>
12 
13 /* PAGE_SHIFT determines the page size */
14 #define PAGE_SHIFT      12
15 #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
16 #define PAGE_MASK       (~(PAGE_SIZE-1))
17 #define PAGE_DEFAULT_ACC	0
18 #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
19 
20 #define HPAGE_SHIFT	20
21 #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
22 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
23 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
24 
25 #define ARCH_HAS_SETCLEAR_HUGE_PTE
26 #define ARCH_HAS_HUGE_PTE_TYPE
27 #define ARCH_HAS_PREPARE_HUGEPAGE
28 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
29 
30 #include <asm/setup.h>
31 #ifndef __ASSEMBLY__
32 
33 static inline void storage_key_init_range(unsigned long start, unsigned long end)
34 {
35 #if PAGE_DEFAULT_KEY
36 	__storage_key_init_range(start, end);
37 #endif
38 }
39 
40 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
41 
42 /*
43  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
44  * bypass caches when copying a page. Especially when copying huge pages
45  * this keeps L1 and L2 data caches alive.
46  */
47 static inline void copy_page(void *to, void *from)
48 {
49 	register void *reg2 asm ("2") = to;
50 	register unsigned long reg3 asm ("3") = 0x1000;
51 	register void *reg4 asm ("4") = from;
52 	register unsigned long reg5 asm ("5") = 0xb0001000;
53 	asm volatile(
54 		"	mvcl	2,4"
55 		: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
56 		: : "memory", "cc");
57 }
58 
59 #define clear_user_page(page, vaddr, pg)	clear_page(page)
60 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
61 
62 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
63 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
64 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
65 
66 /*
67  * These are used to make use of C type-checking..
68  */
69 
70 typedef struct { unsigned long pgprot; } pgprot_t;
71 typedef struct { unsigned long pgste; } pgste_t;
72 typedef struct { unsigned long pte; } pte_t;
73 typedef struct { unsigned long pmd; } pmd_t;
74 typedef struct { unsigned long pud; } pud_t;
75 typedef struct { unsigned long pgd; } pgd_t;
76 typedef pte_t *pgtable_t;
77 
78 #define pgprot_val(x)	((x).pgprot)
79 #define pgste_val(x)	((x).pgste)
80 #define pte_val(x)	((x).pte)
81 #define pmd_val(x)	((x).pmd)
82 #define pud_val(x)	((x).pud)
83 #define pgd_val(x)      ((x).pgd)
84 
85 #define __pgste(x)	((pgste_t) { (x) } )
86 #define __pte(x)        ((pte_t) { (x) } )
87 #define __pmd(x)        ((pmd_t) { (x) } )
88 #define __pud(x)	((pud_t) { (x) } )
89 #define __pgd(x)        ((pgd_t) { (x) } )
90 #define __pgprot(x)     ((pgprot_t) { (x) } )
91 
92 static inline void page_set_storage_key(unsigned long addr,
93 					unsigned char skey, int mapped)
94 {
95 	if (!mapped)
96 		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
97 			     : : "d" (skey), "a" (addr));
98 	else
99 		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
100 }
101 
102 static inline unsigned char page_get_storage_key(unsigned long addr)
103 {
104 	unsigned char skey;
105 
106 	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
107 	return skey;
108 }
109 
110 static inline int page_reset_referenced(unsigned long addr)
111 {
112 	unsigned int ipm;
113 
114 	asm volatile(
115 		"	rrbe	0,%1\n"
116 		"	ipm	%0\n"
117 		: "=d" (ipm) : "a" (addr) : "cc");
118 	return !!(ipm & 0x20000000);
119 }
120 
121 /* Bits int the storage key */
122 #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
123 #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
124 #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
125 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
126 
127 struct page;
128 void arch_free_page(struct page *page, int order);
129 void arch_alloc_page(struct page *page, int order);
130 void arch_set_page_states(int make_stable);
131 
132 static inline int devmem_is_allowed(unsigned long pfn)
133 {
134 	return 0;
135 }
136 
137 #define HAVE_ARCH_FREE_PAGE
138 #define HAVE_ARCH_ALLOC_PAGE
139 
140 #endif /* !__ASSEMBLY__ */
141 
142 #define __PAGE_OFFSET           0x0UL
143 #define PAGE_OFFSET             0x0UL
144 #define __pa(x)                 (unsigned long)(x)
145 #define __va(x)                 (void *)(unsigned long)(x)
146 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
147 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
148 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
149 
150 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | \
151 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
152 
153 #include <asm-generic/memory_model.h>
154 #include <asm-generic/getorder.h>
155 
156 #endif /* _S390_PAGE_H */
157